diff options
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r-- | net/sctp/outqueue.c | 1734 |
1 files changed, 1734 insertions, 0 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c new file mode 100644 index 000000000000..1b2d4adc4ddb --- /dev/null +++ b/net/sctp/outqueue.c | |||
@@ -0,0 +1,1734 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001-2003 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * These functions implement the sctp_outq class. The outqueue handles | ||
10 | * bundling and queueing of outgoing SCTP chunks. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Perry Melange <pmelange@null.cc.uic.edu> | ||
40 | * Xingang Guo <xingang.guo@intel.com> | ||
41 | * Hui Huang <hui.huang@nokia.com> | ||
42 | * Sridhar Samudrala <sri@us.ibm.com> | ||
43 | * Jon Grimm <jgrimm@us.ibm.com> | ||
44 | * | ||
45 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
46 | * be incorporated into the next SCTP release. | ||
47 | */ | ||
48 | |||
49 | #include <linux/types.h> | ||
50 | #include <linux/list.h> /* For struct list_head */ | ||
51 | #include <linux/socket.h> | ||
52 | #include <linux/ip.h> | ||
53 | #include <net/sock.h> /* For skb_set_owner_w */ | ||
54 | |||
55 | #include <net/sctp/sctp.h> | ||
56 | #include <net/sctp/sm.h> | ||
57 | |||
58 | /* Declare internal functions here. */ | ||
59 | static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); | ||
60 | static void sctp_check_transmitted(struct sctp_outq *q, | ||
61 | struct list_head *transmitted_queue, | ||
62 | struct sctp_transport *transport, | ||
63 | struct sctp_sackhdr *sack, | ||
64 | __u32 highest_new_tsn); | ||
65 | |||
66 | static void sctp_mark_missing(struct sctp_outq *q, | ||
67 | struct list_head *transmitted_queue, | ||
68 | struct sctp_transport *transport, | ||
69 | __u32 highest_new_tsn, | ||
70 | int count_of_newacks); | ||
71 | |||
72 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); | ||
73 | |||
74 | /* Add data to the front of the queue. */ | ||
75 | static inline void sctp_outq_head_data(struct sctp_outq *q, | ||
76 | struct sctp_chunk *ch) | ||
77 | { | ||
78 | __skb_queue_head(&q->out, (struct sk_buff *)ch); | ||
79 | q->out_qlen += ch->skb->len; | ||
80 | return; | ||
81 | } | ||
82 | |||
83 | /* Take data from the front of the queue. */ | ||
84 | static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) | ||
85 | { | ||
86 | struct sctp_chunk *ch; | ||
87 | ch = (struct sctp_chunk *)__skb_dequeue(&q->out); | ||
88 | if (ch) | ||
89 | q->out_qlen -= ch->skb->len; | ||
90 | return ch; | ||
91 | } | ||
92 | /* Add data chunk to the end of the queue. */ | ||
93 | static inline void sctp_outq_tail_data(struct sctp_outq *q, | ||
94 | struct sctp_chunk *ch) | ||
95 | { | ||
96 | __skb_queue_tail(&q->out, (struct sk_buff *)ch); | ||
97 | q->out_qlen += ch->skb->len; | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * SFR-CACC algorithm: | ||
103 | * D) If count_of_newacks is greater than or equal to 2 | ||
104 | * and t was not sent to the current primary then the | ||
105 | * sender MUST NOT increment missing report count for t. | ||
106 | */ | ||
107 | static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, | ||
108 | struct sctp_transport *transport, | ||
109 | int count_of_newacks) | ||
110 | { | ||
111 | if (count_of_newacks >=2 && transport != primary) | ||
112 | return 1; | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * SFR-CACC algorithm: | ||
118 | * F) If count_of_newacks is less than 2, let d be the | ||
119 | * destination to which t was sent. If cacc_saw_newack | ||
120 | * is 0 for destination d, then the sender MUST NOT | ||
121 | * increment missing report count for t. | ||
122 | */ | ||
123 | static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, | ||
124 | int count_of_newacks) | ||
125 | { | ||
126 | if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) | ||
127 | return 1; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * SFR-CACC algorithm: | ||
133 | * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD | ||
134 | * execute steps C, D, F. | ||
135 | * | ||
136 | * C has been implemented in sctp_outq_sack | ||
137 | */ | ||
138 | static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, | ||
139 | struct sctp_transport *transport, | ||
140 | int count_of_newacks) | ||
141 | { | ||
142 | if (!primary->cacc.cycling_changeover) { | ||
143 | if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) | ||
144 | return 1; | ||
145 | if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) | ||
146 | return 1; | ||
147 | return 0; | ||
148 | } | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * SFR-CACC algorithm: | ||
154 | * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less | ||
155 | * than next_tsn_at_change of the current primary, then | ||
156 | * the sender MUST NOT increment missing report count | ||
157 | * for t. | ||
158 | */ | ||
159 | static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) | ||
160 | { | ||
161 | if (primary->cacc.cycling_changeover && | ||
162 | TSN_lt(tsn, primary->cacc.next_tsn_at_change)) | ||
163 | return 1; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * SFR-CACC algorithm: | ||
169 | * 3) If the missing report count for TSN t is to be | ||
170 | * incremented according to [RFC2960] and | ||
171 | * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, | ||
172 | * then the sender MUST futher execute steps 3.1 and | ||
173 | * 3.2 to determine if the missing report count for | ||
174 | * TSN t SHOULD NOT be incremented. | ||
175 | * | ||
176 | * 3.3) If 3.1 and 3.2 do not dictate that the missing | ||
177 | * report count for t should not be incremented, then | ||
178 | * the sender SOULD increment missing report count for | ||
179 | * t (according to [RFC2960] and [SCTP_STEWART_2002]). | ||
180 | */ | ||
181 | static inline int sctp_cacc_skip(struct sctp_transport *primary, | ||
182 | struct sctp_transport *transport, | ||
183 | int count_of_newacks, | ||
184 | __u32 tsn) | ||
185 | { | ||
186 | if (primary->cacc.changeover_active && | ||
187 | (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) | ||
188 | || sctp_cacc_skip_3_2(primary, tsn))) | ||
189 | return 1; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /* Initialize an existing sctp_outq. This does the boring stuff. | ||
194 | * You still need to define handlers if you really want to DO | ||
195 | * something with this structure... | ||
196 | */ | ||
197 | void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | ||
198 | { | ||
199 | q->asoc = asoc; | ||
200 | skb_queue_head_init(&q->out); | ||
201 | skb_queue_head_init(&q->control); | ||
202 | INIT_LIST_HEAD(&q->retransmit); | ||
203 | INIT_LIST_HEAD(&q->sacked); | ||
204 | INIT_LIST_HEAD(&q->abandoned); | ||
205 | |||
206 | q->outstanding_bytes = 0; | ||
207 | q->empty = 1; | ||
208 | q->cork = 0; | ||
209 | |||
210 | q->malloced = 0; | ||
211 | q->out_qlen = 0; | ||
212 | } | ||
213 | |||
214 | /* Free the outqueue structure and any related pending chunks. | ||
215 | */ | ||
216 | void sctp_outq_teardown(struct sctp_outq *q) | ||
217 | { | ||
218 | struct sctp_transport *transport; | ||
219 | struct list_head *lchunk, *pos, *temp; | ||
220 | struct sctp_chunk *chunk; | ||
221 | |||
222 | /* Throw away unacknowledged chunks. */ | ||
223 | list_for_each(pos, &q->asoc->peer.transport_addr_list) { | ||
224 | transport = list_entry(pos, struct sctp_transport, transports); | ||
225 | while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { | ||
226 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
227 | transmitted_list); | ||
228 | /* Mark as part of a failed message. */ | ||
229 | sctp_chunk_fail(chunk, q->error); | ||
230 | sctp_chunk_free(chunk); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | /* Throw away chunks that have been gap ACKed. */ | ||
235 | list_for_each_safe(lchunk, temp, &q->sacked) { | ||
236 | list_del_init(lchunk); | ||
237 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
238 | transmitted_list); | ||
239 | sctp_chunk_fail(chunk, q->error); | ||
240 | sctp_chunk_free(chunk); | ||
241 | } | ||
242 | |||
243 | /* Throw away any chunks in the retransmit queue. */ | ||
244 | list_for_each_safe(lchunk, temp, &q->retransmit) { | ||
245 | list_del_init(lchunk); | ||
246 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
247 | transmitted_list); | ||
248 | sctp_chunk_fail(chunk, q->error); | ||
249 | sctp_chunk_free(chunk); | ||
250 | } | ||
251 | |||
252 | /* Throw away any chunks that are in the abandoned queue. */ | ||
253 | list_for_each_safe(lchunk, temp, &q->abandoned) { | ||
254 | list_del_init(lchunk); | ||
255 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
256 | transmitted_list); | ||
257 | sctp_chunk_fail(chunk, q->error); | ||
258 | sctp_chunk_free(chunk); | ||
259 | } | ||
260 | |||
261 | /* Throw away any leftover data chunks. */ | ||
262 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | ||
263 | |||
264 | /* Mark as send failure. */ | ||
265 | sctp_chunk_fail(chunk, q->error); | ||
266 | sctp_chunk_free(chunk); | ||
267 | } | ||
268 | |||
269 | q->error = 0; | ||
270 | |||
271 | /* Throw away any leftover control chunks. */ | ||
272 | while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) | ||
273 | sctp_chunk_free(chunk); | ||
274 | } | ||
275 | |||
276 | /* Free the outqueue structure and any related pending chunks. */ | ||
277 | void sctp_outq_free(struct sctp_outq *q) | ||
278 | { | ||
279 | /* Throw away leftover chunks. */ | ||
280 | sctp_outq_teardown(q); | ||
281 | |||
282 | /* If we were kmalloc()'d, free the memory. */ | ||
283 | if (q->malloced) | ||
284 | kfree(q); | ||
285 | } | ||
286 | |||
287 | /* Put a new chunk in an sctp_outq. */ | ||
288 | int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | ||
289 | { | ||
290 | int error = 0; | ||
291 | |||
292 | SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", | ||
293 | q, chunk, chunk && chunk->chunk_hdr ? | ||
294 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) | ||
295 | : "Illegal Chunk"); | ||
296 | |||
297 | /* If it is data, queue it up, otherwise, send it | ||
298 | * immediately. | ||
299 | */ | ||
300 | if (SCTP_CID_DATA == chunk->chunk_hdr->type) { | ||
301 | /* Is it OK to queue data chunks? */ | ||
302 | /* From 9. Termination of Association | ||
303 | * | ||
304 | * When either endpoint performs a shutdown, the | ||
305 | * association on each peer will stop accepting new | ||
306 | * data from its user and only deliver data in queue | ||
307 | * at the time of sending or receiving the SHUTDOWN | ||
308 | * chunk. | ||
309 | */ | ||
310 | switch (q->asoc->state) { | ||
311 | case SCTP_STATE_EMPTY: | ||
312 | case SCTP_STATE_CLOSED: | ||
313 | case SCTP_STATE_SHUTDOWN_PENDING: | ||
314 | case SCTP_STATE_SHUTDOWN_SENT: | ||
315 | case SCTP_STATE_SHUTDOWN_RECEIVED: | ||
316 | case SCTP_STATE_SHUTDOWN_ACK_SENT: | ||
317 | /* Cannot send after transport endpoint shutdown */ | ||
318 | error = -ESHUTDOWN; | ||
319 | break; | ||
320 | |||
321 | default: | ||
322 | SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", | ||
323 | q, chunk, chunk && chunk->chunk_hdr ? | ||
324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) | ||
325 | : "Illegal Chunk"); | ||
326 | |||
327 | sctp_outq_tail_data(q, chunk); | ||
328 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | ||
329 | SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); | ||
330 | else | ||
331 | SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); | ||
332 | q->empty = 0; | ||
333 | break; | ||
334 | }; | ||
335 | } else { | ||
336 | __skb_queue_tail(&q->control, (struct sk_buff *) chunk); | ||
337 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
338 | } | ||
339 | |||
340 | if (error < 0) | ||
341 | return error; | ||
342 | |||
343 | if (!q->cork) | ||
344 | error = sctp_outq_flush(q, 0); | ||
345 | |||
346 | return error; | ||
347 | } | ||
348 | |||
349 | /* Insert a chunk into the sorted list based on the TSNs. The retransmit list | ||
350 | * and the abandoned list are in ascending order. | ||
351 | */ | ||
352 | static void sctp_insert_list(struct list_head *head, struct list_head *new) | ||
353 | { | ||
354 | struct list_head *pos; | ||
355 | struct sctp_chunk *nchunk, *lchunk; | ||
356 | __u32 ntsn, ltsn; | ||
357 | int done = 0; | ||
358 | |||
359 | nchunk = list_entry(new, struct sctp_chunk, transmitted_list); | ||
360 | ntsn = ntohl(nchunk->subh.data_hdr->tsn); | ||
361 | |||
362 | list_for_each(pos, head) { | ||
363 | lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); | ||
364 | ltsn = ntohl(lchunk->subh.data_hdr->tsn); | ||
365 | if (TSN_lt(ntsn, ltsn)) { | ||
366 | list_add(new, pos->prev); | ||
367 | done = 1; | ||
368 | break; | ||
369 | } | ||
370 | } | ||
371 | if (!done) | ||
372 | list_add_tail(new, head); | ||
373 | } | ||
374 | |||
375 | /* Mark all the eligible packets on a transport for retransmission. */ | ||
376 | void sctp_retransmit_mark(struct sctp_outq *q, | ||
377 | struct sctp_transport *transport, | ||
378 | __u8 fast_retransmit) | ||
379 | { | ||
380 | struct list_head *lchunk, *ltemp; | ||
381 | struct sctp_chunk *chunk; | ||
382 | |||
383 | /* Walk through the specified transmitted queue. */ | ||
384 | list_for_each_safe(lchunk, ltemp, &transport->transmitted) { | ||
385 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
386 | transmitted_list); | ||
387 | |||
388 | /* If the chunk is abandoned, move it to abandoned list. */ | ||
389 | if (sctp_chunk_abandoned(chunk)) { | ||
390 | list_del_init(lchunk); | ||
391 | sctp_insert_list(&q->abandoned, lchunk); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | /* If we are doing retransmission due to a fast retransmit, | ||
396 | * only the chunk's that are marked for fast retransmit | ||
397 | * should be added to the retransmit queue. If we are doing | ||
398 | * retransmission due to a timeout or pmtu discovery, only the | ||
399 | * chunks that are not yet acked should be added to the | ||
400 | * retransmit queue. | ||
401 | */ | ||
402 | if ((fast_retransmit && chunk->fast_retransmit) || | ||
403 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | ||
404 | /* RFC 2960 6.2.1 Processing a Received SACK | ||
405 | * | ||
406 | * C) Any time a DATA chunk is marked for | ||
407 | * retransmission (via either T3-rtx timer expiration | ||
408 | * (Section 6.3.3) or via fast retransmit | ||
409 | * (Section 7.2.4)), add the data size of those | ||
410 | * chunks to the rwnd. | ||
411 | */ | ||
412 | q->asoc->peer.rwnd += sctp_data_size(chunk); | ||
413 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
414 | transport->flight_size -= sctp_data_size(chunk); | ||
415 | |||
416 | /* sctpimpguide-05 Section 2.8.2 | ||
417 | * M5) If a T3-rtx timer expires, the | ||
418 | * 'TSN.Missing.Report' of all affected TSNs is set | ||
419 | * to 0. | ||
420 | */ | ||
421 | chunk->tsn_missing_report = 0; | ||
422 | |||
423 | /* If a chunk that is being used for RTT measurement | ||
424 | * has to be retransmitted, we cannot use this chunk | ||
425 | * anymore for RTT measurements. Reset rto_pending so | ||
426 | * that a new RTT measurement is started when a new | ||
427 | * data chunk is sent. | ||
428 | */ | ||
429 | if (chunk->rtt_in_progress) { | ||
430 | chunk->rtt_in_progress = 0; | ||
431 | transport->rto_pending = 0; | ||
432 | } | ||
433 | |||
434 | /* Move the chunk to the retransmit queue. The chunks | ||
435 | * on the retransmit queue are always kept in order. | ||
436 | */ | ||
437 | list_del_init(lchunk); | ||
438 | sctp_insert_list(&q->retransmit, lchunk); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " | ||
443 | "cwnd: %d, ssthresh: %d, flight_size: %d, " | ||
444 | "pba: %d\n", __FUNCTION__, | ||
445 | transport, fast_retransmit, | ||
446 | transport->cwnd, transport->ssthresh, | ||
447 | transport->flight_size, | ||
448 | transport->partial_bytes_acked); | ||
449 | |||
450 | } | ||
451 | |||
452 | /* Mark all the eligible packets on a transport for retransmission and force | ||
453 | * one packet out. | ||
454 | */ | ||
455 | void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | ||
456 | sctp_retransmit_reason_t reason) | ||
457 | { | ||
458 | int error = 0; | ||
459 | __u8 fast_retransmit = 0; | ||
460 | |||
461 | switch(reason) { | ||
462 | case SCTP_RTXR_T3_RTX: | ||
463 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); | ||
464 | /* Update the retran path if the T3-rtx timer has expired for | ||
465 | * the current retran path. | ||
466 | */ | ||
467 | if (transport == transport->asoc->peer.retran_path) | ||
468 | sctp_assoc_update_retran_path(transport->asoc); | ||
469 | break; | ||
470 | case SCTP_RTXR_FAST_RTX: | ||
471 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); | ||
472 | fast_retransmit = 1; | ||
473 | break; | ||
474 | case SCTP_RTXR_PMTUD: | ||
475 | default: | ||
476 | break; | ||
477 | } | ||
478 | |||
479 | sctp_retransmit_mark(q, transport, fast_retransmit); | ||
480 | |||
481 | /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, | ||
482 | * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by | ||
483 | * following the procedures outlined in C1 - C5. | ||
484 | */ | ||
485 | sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); | ||
486 | |||
487 | error = sctp_outq_flush(q, /* rtx_timeout */ 1); | ||
488 | |||
489 | if (error) | ||
490 | q->asoc->base.sk->sk_err = -error; | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * Transmit DATA chunks on the retransmit queue. Upon return from | ||
495 | * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which | ||
496 | * need to be transmitted by the caller. | ||
497 | * We assume that pkt->transport has already been set. | ||
498 | * | ||
499 | * The return value is a normal kernel error return value. | ||
500 | */ | ||
501 | static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | ||
502 | int rtx_timeout, int *start_timer) | ||
503 | { | ||
504 | struct list_head *lqueue; | ||
505 | struct list_head *lchunk, *lchunk1; | ||
506 | struct sctp_transport *transport = pkt->transport; | ||
507 | sctp_xmit_t status; | ||
508 | struct sctp_chunk *chunk, *chunk1; | ||
509 | struct sctp_association *asoc; | ||
510 | int error = 0; | ||
511 | |||
512 | asoc = q->asoc; | ||
513 | lqueue = &q->retransmit; | ||
514 | |||
515 | /* RFC 2960 6.3.3 Handle T3-rtx Expiration | ||
516 | * | ||
517 | * E3) Determine how many of the earliest (i.e., lowest TSN) | ||
518 | * outstanding DATA chunks for the address for which the | ||
519 | * T3-rtx has expired will fit into a single packet, subject | ||
520 | * to the MTU constraint for the path corresponding to the | ||
521 | * destination transport address to which the retransmission | ||
522 | * is being sent (this may be different from the address for | ||
523 | * which the timer expires [see Section 6.4]). Call this value | ||
524 | * K. Bundle and retransmit those K DATA chunks in a single | ||
525 | * packet to the destination endpoint. | ||
526 | * | ||
527 | * [Just to be painfully clear, if we are retransmitting | ||
528 | * because a timeout just happened, we should send only ONE | ||
529 | * packet of retransmitted data.] | ||
530 | */ | ||
531 | lchunk = sctp_list_dequeue(lqueue); | ||
532 | |||
533 | while (lchunk) { | ||
534 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
535 | transmitted_list); | ||
536 | |||
537 | /* Make sure that Gap Acked TSNs are not retransmitted. A | ||
538 | * simple approach is just to move such TSNs out of the | ||
539 | * way and into a 'transmitted' queue and skip to the | ||
540 | * next chunk. | ||
541 | */ | ||
542 | if (chunk->tsn_gap_acked) { | ||
543 | list_add_tail(lchunk, &transport->transmitted); | ||
544 | lchunk = sctp_list_dequeue(lqueue); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | /* Attempt to append this chunk to the packet. */ | ||
549 | status = sctp_packet_append_chunk(pkt, chunk); | ||
550 | |||
551 | switch (status) { | ||
552 | case SCTP_XMIT_PMTU_FULL: | ||
553 | /* Send this packet. */ | ||
554 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
555 | *start_timer = 1; | ||
556 | |||
557 | /* If we are retransmitting, we should only | ||
558 | * send a single packet. | ||
559 | */ | ||
560 | if (rtx_timeout) { | ||
561 | list_add(lchunk, lqueue); | ||
562 | lchunk = NULL; | ||
563 | } | ||
564 | |||
565 | /* Bundle lchunk in the next round. */ | ||
566 | break; | ||
567 | |||
568 | case SCTP_XMIT_RWND_FULL: | ||
569 | /* Send this packet. */ | ||
570 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
571 | *start_timer = 1; | ||
572 | |||
573 | /* Stop sending DATA as there is no more room | ||
574 | * at the receiver. | ||
575 | */ | ||
576 | list_add(lchunk, lqueue); | ||
577 | lchunk = NULL; | ||
578 | break; | ||
579 | |||
580 | case SCTP_XMIT_NAGLE_DELAY: | ||
581 | /* Send this packet. */ | ||
582 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
583 | *start_timer = 1; | ||
584 | |||
585 | /* Stop sending DATA because of nagle delay. */ | ||
586 | list_add(lchunk, lqueue); | ||
587 | lchunk = NULL; | ||
588 | break; | ||
589 | |||
590 | default: | ||
591 | /* The append was successful, so add this chunk to | ||
592 | * the transmitted list. | ||
593 | */ | ||
594 | list_add_tail(lchunk, &transport->transmitted); | ||
595 | |||
596 | /* Mark the chunk as ineligible for fast retransmit | ||
597 | * after it is retransmitted. | ||
598 | */ | ||
599 | chunk->fast_retransmit = 0; | ||
600 | |||
601 | *start_timer = 1; | ||
602 | q->empty = 0; | ||
603 | |||
604 | /* Retrieve a new chunk to bundle. */ | ||
605 | lchunk = sctp_list_dequeue(lqueue); | ||
606 | break; | ||
607 | }; | ||
608 | |||
609 | /* If we are here due to a retransmit timeout or a fast | ||
610 | * retransmit and if there are any chunks left in the retransmit | ||
611 | * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. | ||
612 | */ | ||
613 | if (rtx_timeout && !lchunk) { | ||
614 | list_for_each(lchunk1, lqueue) { | ||
615 | chunk1 = list_entry(lchunk1, struct sctp_chunk, | ||
616 | transmitted_list); | ||
617 | chunk1->fast_retransmit = 0; | ||
618 | } | ||
619 | } | ||
620 | } | ||
621 | |||
622 | return error; | ||
623 | } | ||
624 | |||
625 | /* Cork the outqueue so queued chunks are really queued. */ | ||
626 | int sctp_outq_uncork(struct sctp_outq *q) | ||
627 | { | ||
628 | int error = 0; | ||
629 | if (q->cork) { | ||
630 | q->cork = 0; | ||
631 | error = sctp_outq_flush(q, 0); | ||
632 | } | ||
633 | return error; | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Try to flush an outqueue. | ||
638 | * | ||
639 | * Description: Send everything in q which we legally can, subject to | ||
640 | * congestion limitations. | ||
641 | * * Note: This function can be called from multiple contexts so appropriate | ||
642 | * locking concerns must be made. Today we use the sock lock to protect | ||
643 | * this function. | ||
644 | */ | ||
645 | int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | ||
646 | { | ||
647 | struct sctp_packet *packet; | ||
648 | struct sctp_packet singleton; | ||
649 | struct sctp_association *asoc = q->asoc; | ||
650 | __u16 sport = asoc->base.bind_addr.port; | ||
651 | __u16 dport = asoc->peer.port; | ||
652 | __u32 vtag = asoc->peer.i.init_tag; | ||
653 | struct sk_buff_head *queue; | ||
654 | struct sctp_transport *transport = NULL; | ||
655 | struct sctp_transport *new_transport; | ||
656 | struct sctp_chunk *chunk; | ||
657 | sctp_xmit_t status; | ||
658 | int error = 0; | ||
659 | int start_timer = 0; | ||
660 | |||
661 | /* These transports have chunks to send. */ | ||
662 | struct list_head transport_list; | ||
663 | struct list_head *ltransport; | ||
664 | |||
665 | INIT_LIST_HEAD(&transport_list); | ||
666 | packet = NULL; | ||
667 | |||
668 | /* | ||
669 | * 6.10 Bundling | ||
670 | * ... | ||
671 | * When bundling control chunks with DATA chunks, an | ||
672 | * endpoint MUST place control chunks first in the outbound | ||
673 | * SCTP packet. The transmitter MUST transmit DATA chunks | ||
674 | * within a SCTP packet in increasing order of TSN. | ||
675 | * ... | ||
676 | */ | ||
677 | |||
678 | queue = &q->control; | ||
679 | while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { | ||
680 | /* Pick the right transport to use. */ | ||
681 | new_transport = chunk->transport; | ||
682 | |||
683 | if (!new_transport) { | ||
684 | new_transport = asoc->peer.active_path; | ||
685 | } else if (!new_transport->active) { | ||
686 | /* If the chunk is Heartbeat or Heartbeat Ack, | ||
687 | * send it to chunk->transport, even if it's | ||
688 | * inactive. | ||
689 | * | ||
690 | * 3.3.6 Heartbeat Acknowledgement: | ||
691 | * ... | ||
692 | * A HEARTBEAT ACK is always sent to the source IP | ||
693 | * address of the IP datagram containing the | ||
694 | * HEARTBEAT chunk to which this ack is responding. | ||
695 | * ... | ||
696 | */ | ||
697 | if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && | ||
698 | chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK) | ||
699 | new_transport = asoc->peer.active_path; | ||
700 | } | ||
701 | |||
702 | /* Are we switching transports? | ||
703 | * Take care of transport locks. | ||
704 | */ | ||
705 | if (new_transport != transport) { | ||
706 | transport = new_transport; | ||
707 | if (list_empty(&transport->send_ready)) { | ||
708 | list_add_tail(&transport->send_ready, | ||
709 | &transport_list); | ||
710 | } | ||
711 | packet = &transport->packet; | ||
712 | sctp_packet_config(packet, vtag, | ||
713 | asoc->peer.ecn_capable); | ||
714 | } | ||
715 | |||
716 | switch (chunk->chunk_hdr->type) { | ||
717 | /* | ||
718 | * 6.10 Bundling | ||
719 | * ... | ||
720 | * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN | ||
721 | * COMPLETE with any other chunks. [Send them immediately.] | ||
722 | */ | ||
723 | case SCTP_CID_INIT: | ||
724 | case SCTP_CID_INIT_ACK: | ||
725 | case SCTP_CID_SHUTDOWN_COMPLETE: | ||
726 | sctp_packet_init(&singleton, transport, sport, dport); | ||
727 | sctp_packet_config(&singleton, vtag, 0); | ||
728 | sctp_packet_append_chunk(&singleton, chunk); | ||
729 | error = sctp_packet_transmit(&singleton); | ||
730 | if (error < 0) | ||
731 | return error; | ||
732 | break; | ||
733 | |||
734 | case SCTP_CID_ABORT: | ||
735 | case SCTP_CID_SACK: | ||
736 | case SCTP_CID_HEARTBEAT: | ||
737 | case SCTP_CID_HEARTBEAT_ACK: | ||
738 | case SCTP_CID_SHUTDOWN: | ||
739 | case SCTP_CID_SHUTDOWN_ACK: | ||
740 | case SCTP_CID_ERROR: | ||
741 | case SCTP_CID_COOKIE_ECHO: | ||
742 | case SCTP_CID_COOKIE_ACK: | ||
743 | case SCTP_CID_ECN_ECNE: | ||
744 | case SCTP_CID_ECN_CWR: | ||
745 | case SCTP_CID_ASCONF: | ||
746 | case SCTP_CID_ASCONF_ACK: | ||
747 | case SCTP_CID_FWD_TSN: | ||
748 | sctp_packet_transmit_chunk(packet, chunk); | ||
749 | break; | ||
750 | |||
751 | default: | ||
752 | /* We built a chunk with an illegal type! */ | ||
753 | BUG(); | ||
754 | }; | ||
755 | } | ||
756 | |||
757 | /* Is it OK to send data chunks? */ | ||
758 | switch (asoc->state) { | ||
759 | case SCTP_STATE_COOKIE_ECHOED: | ||
760 | /* Only allow bundling when this packet has a COOKIE-ECHO | ||
761 | * chunk. | ||
762 | */ | ||
763 | if (!packet || !packet->has_cookie_echo) | ||
764 | break; | ||
765 | |||
766 | /* fallthru */ | ||
767 | case SCTP_STATE_ESTABLISHED: | ||
768 | case SCTP_STATE_SHUTDOWN_PENDING: | ||
769 | case SCTP_STATE_SHUTDOWN_RECEIVED: | ||
770 | /* | ||
771 | * RFC 2960 6.1 Transmission of DATA Chunks | ||
772 | * | ||
773 | * C) When the time comes for the sender to transmit, | ||
774 | * before sending new DATA chunks, the sender MUST | ||
775 | * first transmit any outstanding DATA chunks which | ||
776 | * are marked for retransmission (limited by the | ||
777 | * current cwnd). | ||
778 | */ | ||
779 | if (!list_empty(&q->retransmit)) { | ||
780 | if (transport == asoc->peer.retran_path) | ||
781 | goto retran; | ||
782 | |||
783 | /* Switch transports & prepare the packet. */ | ||
784 | |||
785 | transport = asoc->peer.retran_path; | ||
786 | |||
787 | if (list_empty(&transport->send_ready)) { | ||
788 | list_add_tail(&transport->send_ready, | ||
789 | &transport_list); | ||
790 | } | ||
791 | |||
792 | packet = &transport->packet; | ||
793 | sctp_packet_config(packet, vtag, | ||
794 | asoc->peer.ecn_capable); | ||
795 | retran: | ||
796 | error = sctp_outq_flush_rtx(q, packet, | ||
797 | rtx_timeout, &start_timer); | ||
798 | |||
799 | if (start_timer) | ||
800 | sctp_transport_reset_timers(transport); | ||
801 | |||
802 | /* This can happen on COOKIE-ECHO resend. Only | ||
803 | * one chunk can get bundled with a COOKIE-ECHO. | ||
804 | */ | ||
805 | if (packet->has_cookie_echo) | ||
806 | goto sctp_flush_out; | ||
807 | |||
808 | /* Don't send new data if there is still data | ||
809 | * waiting to retransmit. | ||
810 | */ | ||
811 | if (!list_empty(&q->retransmit)) | ||
812 | goto sctp_flush_out; | ||
813 | } | ||
814 | |||
815 | /* Finally, transmit new packets. */ | ||
816 | start_timer = 0; | ||
817 | queue = &q->out; | ||
818 | |||
819 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | ||
820 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid | ||
821 | * stream identifier. | ||
822 | */ | ||
823 | if (chunk->sinfo.sinfo_stream >= | ||
824 | asoc->c.sinit_num_ostreams) { | ||
825 | |||
826 | /* Mark as failed send. */ | ||
827 | sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); | ||
828 | sctp_chunk_free(chunk); | ||
829 | continue; | ||
830 | } | ||
831 | |||
832 | /* Has this chunk expired? */ | ||
833 | if (sctp_chunk_abandoned(chunk)) { | ||
834 | sctp_chunk_fail(chunk, 0); | ||
835 | sctp_chunk_free(chunk); | ||
836 | continue; | ||
837 | } | ||
838 | |||
839 | /* If there is a specified transport, use it. | ||
840 | * Otherwise, we want to use the active path. | ||
841 | */ | ||
842 | new_transport = chunk->transport; | ||
843 | if (!new_transport || !new_transport->active) | ||
844 | new_transport = asoc->peer.active_path; | ||
845 | |||
846 | /* Change packets if necessary. */ | ||
847 | if (new_transport != transport) { | ||
848 | transport = new_transport; | ||
849 | |||
850 | /* Schedule to have this transport's | ||
851 | * packet flushed. | ||
852 | */ | ||
853 | if (list_empty(&transport->send_ready)) { | ||
854 | list_add_tail(&transport->send_ready, | ||
855 | &transport_list); | ||
856 | } | ||
857 | |||
858 | packet = &transport->packet; | ||
859 | sctp_packet_config(packet, vtag, | ||
860 | asoc->peer.ecn_capable); | ||
861 | } | ||
862 | |||
863 | SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", | ||
864 | q, chunk, | ||
865 | chunk && chunk->chunk_hdr ? | ||
866 | sctp_cname(SCTP_ST_CHUNK( | ||
867 | chunk->chunk_hdr->type)) | ||
868 | : "Illegal Chunk"); | ||
869 | |||
870 | SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " | ||
871 | "%p skb->users %d.\n", | ||
872 | ntohl(chunk->subh.data_hdr->tsn), | ||
873 | chunk->skb ?chunk->skb->head : NULL, | ||
874 | chunk->skb ? | ||
875 | atomic_read(&chunk->skb->users) : -1); | ||
876 | |||
877 | /* Add the chunk to the packet. */ | ||
878 | status = sctp_packet_transmit_chunk(packet, chunk); | ||
879 | |||
880 | switch (status) { | ||
881 | case SCTP_XMIT_PMTU_FULL: | ||
882 | case SCTP_XMIT_RWND_FULL: | ||
883 | case SCTP_XMIT_NAGLE_DELAY: | ||
884 | /* We could not append this chunk, so put | ||
885 | * the chunk back on the output queue. | ||
886 | */ | ||
887 | SCTP_DEBUG_PRINTK("sctp_outq_flush: could " | ||
888 | "not transmit TSN: 0x%x, status: %d\n", | ||
889 | ntohl(chunk->subh.data_hdr->tsn), | ||
890 | status); | ||
891 | sctp_outq_head_data(q, chunk); | ||
892 | goto sctp_flush_out; | ||
893 | break; | ||
894 | |||
895 | case SCTP_XMIT_OK: | ||
896 | break; | ||
897 | |||
898 | default: | ||
899 | BUG(); | ||
900 | } | ||
901 | |||
902 | /* BUG: We assume that the sctp_packet_transmit() | ||
903 | * call below will succeed all the time and add the | ||
904 | * chunk to the transmitted list and restart the | ||
905 | * timers. | ||
906 | * It is possible that the call can fail under OOM | ||
907 | * conditions. | ||
908 | * | ||
909 | * Is this really a problem? Won't this behave | ||
910 | * like a lost TSN? | ||
911 | */ | ||
912 | list_add_tail(&chunk->transmitted_list, | ||
913 | &transport->transmitted); | ||
914 | |||
915 | sctp_transport_reset_timers(transport); | ||
916 | |||
917 | q->empty = 0; | ||
918 | |||
919 | /* Only let one DATA chunk get bundled with a | ||
920 | * COOKIE-ECHO chunk. | ||
921 | */ | ||
922 | if (packet->has_cookie_echo) | ||
923 | goto sctp_flush_out; | ||
924 | } | ||
925 | break; | ||
926 | |||
927 | default: | ||
928 | /* Do nothing. */ | ||
929 | break; | ||
930 | } | ||
931 | |||
932 | sctp_flush_out: | ||
933 | |||
934 | /* Before returning, examine all the transports touched in | ||
935 | * this call. Right now, we bluntly force clear all the | ||
936 | * transports. Things might change after we implement Nagle. | ||
937 | * But such an examination is still required. | ||
938 | * | ||
939 | * --xguo | ||
940 | */ | ||
941 | while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { | ||
942 | struct sctp_transport *t = list_entry(ltransport, | ||
943 | struct sctp_transport, | ||
944 | send_ready); | ||
945 | packet = &t->packet; | ||
946 | if (!sctp_packet_empty(packet)) | ||
947 | error = sctp_packet_transmit(packet); | ||
948 | } | ||
949 | |||
950 | return error; | ||
951 | } | ||
952 | |||
953 | /* Update unack_data based on the incoming SACK chunk */ | ||
954 | static void sctp_sack_update_unack_data(struct sctp_association *assoc, | ||
955 | struct sctp_sackhdr *sack) | ||
956 | { | ||
957 | sctp_sack_variable_t *frags; | ||
958 | __u16 unack_data; | ||
959 | int i; | ||
960 | |||
961 | unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; | ||
962 | |||
963 | frags = sack->variable; | ||
964 | for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { | ||
965 | unack_data -= ((ntohs(frags[i].gab.end) - | ||
966 | ntohs(frags[i].gab.start) + 1)); | ||
967 | } | ||
968 | |||
969 | assoc->unack_data = unack_data; | ||
970 | } | ||
971 | |||
972 | /* Return the highest new tsn that is acknowledged by the given SACK chunk. */ | ||
973 | static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack, | ||
974 | struct sctp_association *asoc) | ||
975 | { | ||
976 | struct list_head *ltransport, *lchunk; | ||
977 | struct sctp_transport *transport; | ||
978 | struct sctp_chunk *chunk; | ||
979 | __u32 highest_new_tsn, tsn; | ||
980 | struct list_head *transport_list = &asoc->peer.transport_addr_list; | ||
981 | |||
982 | highest_new_tsn = ntohl(sack->cum_tsn_ack); | ||
983 | |||
984 | list_for_each(ltransport, transport_list) { | ||
985 | transport = list_entry(ltransport, struct sctp_transport, | ||
986 | transports); | ||
987 | list_for_each(lchunk, &transport->transmitted) { | ||
988 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
989 | transmitted_list); | ||
990 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
991 | |||
992 | if (!chunk->tsn_gap_acked && | ||
993 | TSN_lt(highest_new_tsn, tsn) && | ||
994 | sctp_acked(sack, tsn)) | ||
995 | highest_new_tsn = tsn; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | return highest_new_tsn; | ||
1000 | } | ||
1001 | |||
1002 | /* This is where we REALLY process a SACK. | ||
1003 | * | ||
1004 | * Process the SACK against the outqueue. Mostly, this just frees | ||
1005 | * things off the transmitted queue. | ||
1006 | */ | ||
1007 | int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | ||
1008 | { | ||
1009 | struct sctp_association *asoc = q->asoc; | ||
1010 | struct sctp_transport *transport; | ||
1011 | struct sctp_chunk *tchunk = NULL; | ||
1012 | struct list_head *lchunk, *transport_list, *pos, *temp; | ||
1013 | sctp_sack_variable_t *frags = sack->variable; | ||
1014 | __u32 sack_ctsn, ctsn, tsn; | ||
1015 | __u32 highest_tsn, highest_new_tsn; | ||
1016 | __u32 sack_a_rwnd; | ||
1017 | unsigned outstanding; | ||
1018 | struct sctp_transport *primary = asoc->peer.primary_path; | ||
1019 | int count_of_newacks = 0; | ||
1020 | |||
1021 | /* Grab the association's destination address list. */ | ||
1022 | transport_list = &asoc->peer.transport_addr_list; | ||
1023 | |||
1024 | sack_ctsn = ntohl(sack->cum_tsn_ack); | ||
1025 | |||
1026 | /* | ||
1027 | * SFR-CACC algorithm: | ||
1028 | * On receipt of a SACK the sender SHOULD execute the | ||
1029 | * following statements. | ||
1030 | * | ||
1031 | * 1) If the cumulative ack in the SACK passes next tsn_at_change | ||
1032 | * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be | ||
1033 | * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for | ||
1034 | * all destinations. | ||
1035 | */ | ||
1036 | if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { | ||
1037 | primary->cacc.changeover_active = 0; | ||
1038 | list_for_each(pos, transport_list) { | ||
1039 | transport = list_entry(pos, struct sctp_transport, | ||
1040 | transports); | ||
1041 | transport->cacc.cycling_changeover = 0; | ||
1042 | } | ||
1043 | } | ||
1044 | |||
1045 | /* | ||
1046 | * SFR-CACC algorithm: | ||
1047 | * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE | ||
1048 | * is set the receiver of the SACK MUST take the following actions: | ||
1049 | * | ||
1050 | * A) Initialize the cacc_saw_newack to 0 for all destination | ||
1051 | * addresses. | ||
1052 | */ | ||
1053 | if (sack->num_gap_ack_blocks > 0 && | ||
1054 | primary->cacc.changeover_active) { | ||
1055 | list_for_each(pos, transport_list) { | ||
1056 | transport = list_entry(pos, struct sctp_transport, | ||
1057 | transports); | ||
1058 | transport->cacc.cacc_saw_newack = 0; | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | /* Get the highest TSN in the sack. */ | ||
1063 | highest_tsn = sack_ctsn; | ||
1064 | if (sack->num_gap_ack_blocks) | ||
1065 | highest_tsn += | ||
1066 | ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end); | ||
1067 | |||
1068 | if (TSN_lt(asoc->highest_sacked, highest_tsn)) { | ||
1069 | highest_new_tsn = highest_tsn; | ||
1070 | asoc->highest_sacked = highest_tsn; | ||
1071 | } else { | ||
1072 | highest_new_tsn = sctp_highest_new_tsn(sack, asoc); | ||
1073 | } | ||
1074 | |||
1075 | /* Run through the retransmit queue. Credit bytes received | ||
1076 | * and free those chunks that we can. | ||
1077 | */ | ||
1078 | sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); | ||
1079 | sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0); | ||
1080 | |||
1081 | /* Run through the transmitted queue. | ||
1082 | * Credit bytes received and free those chunks which we can. | ||
1083 | * | ||
1084 | * This is a MASSIVE candidate for optimization. | ||
1085 | */ | ||
1086 | list_for_each(pos, transport_list) { | ||
1087 | transport = list_entry(pos, struct sctp_transport, | ||
1088 | transports); | ||
1089 | sctp_check_transmitted(q, &transport->transmitted, | ||
1090 | transport, sack, highest_new_tsn); | ||
1091 | /* | ||
1092 | * SFR-CACC algorithm: | ||
1093 | * C) Let count_of_newacks be the number of | ||
1094 | * destinations for which cacc_saw_newack is set. | ||
1095 | */ | ||
1096 | if (transport->cacc.cacc_saw_newack) | ||
1097 | count_of_newacks ++; | ||
1098 | } | ||
1099 | |||
1100 | list_for_each(pos, transport_list) { | ||
1101 | transport = list_entry(pos, struct sctp_transport, | ||
1102 | transports); | ||
1103 | sctp_mark_missing(q, &transport->transmitted, transport, | ||
1104 | highest_new_tsn, count_of_newacks); | ||
1105 | } | ||
1106 | |||
1107 | /* Move the Cumulative TSN Ack Point if appropriate. */ | ||
1108 | if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) | ||
1109 | asoc->ctsn_ack_point = sack_ctsn; | ||
1110 | |||
1111 | /* Update unack_data field in the assoc. */ | ||
1112 | sctp_sack_update_unack_data(asoc, sack); | ||
1113 | |||
1114 | ctsn = asoc->ctsn_ack_point; | ||
1115 | |||
1116 | /* Throw away stuff rotting on the sack queue. */ | ||
1117 | list_for_each_safe(lchunk, temp, &q->sacked) { | ||
1118 | tchunk = list_entry(lchunk, struct sctp_chunk, | ||
1119 | transmitted_list); | ||
1120 | tsn = ntohl(tchunk->subh.data_hdr->tsn); | ||
1121 | if (TSN_lte(tsn, ctsn)) | ||
1122 | sctp_chunk_free(tchunk); | ||
1123 | } | ||
1124 | |||
1125 | /* ii) Set rwnd equal to the newly received a_rwnd minus the | ||
1126 | * number of bytes still outstanding after processing the | ||
1127 | * Cumulative TSN Ack and the Gap Ack Blocks. | ||
1128 | */ | ||
1129 | |||
1130 | sack_a_rwnd = ntohl(sack->a_rwnd); | ||
1131 | outstanding = q->outstanding_bytes; | ||
1132 | |||
1133 | if (outstanding < sack_a_rwnd) | ||
1134 | sack_a_rwnd -= outstanding; | ||
1135 | else | ||
1136 | sack_a_rwnd = 0; | ||
1137 | |||
1138 | asoc->peer.rwnd = sack_a_rwnd; | ||
1139 | |||
1140 | sctp_generate_fwdtsn(q, sack_ctsn); | ||
1141 | |||
1142 | SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", | ||
1143 | __FUNCTION__, sack_ctsn); | ||
1144 | SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " | ||
1145 | "%p is 0x%x. Adv peer ack point: 0x%x\n", | ||
1146 | __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point); | ||
1147 | |||
1148 | /* See if all chunks are acked. | ||
1149 | * Make sure the empty queue handler will get run later. | ||
1150 | */ | ||
1151 | q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && | ||
1152 | list_empty(&q->retransmit); | ||
1153 | if (!q->empty) | ||
1154 | goto finish; | ||
1155 | |||
1156 | list_for_each(pos, transport_list) { | ||
1157 | transport = list_entry(pos, struct sctp_transport, | ||
1158 | transports); | ||
1159 | q->empty = q->empty && list_empty(&transport->transmitted); | ||
1160 | if (!q->empty) | ||
1161 | goto finish; | ||
1162 | } | ||
1163 | |||
1164 | SCTP_DEBUG_PRINTK("sack queue is empty.\n"); | ||
1165 | finish: | ||
1166 | return q->empty; | ||
1167 | } | ||
1168 | |||
1169 | /* Is the outqueue empty? */ | ||
1170 | int sctp_outq_is_empty(const struct sctp_outq *q) | ||
1171 | { | ||
1172 | return q->empty; | ||
1173 | } | ||
1174 | |||
1175 | /******************************************************************** | ||
1176 | * 2nd Level Abstractions | ||
1177 | ********************************************************************/ | ||
1178 | |||
1179 | /* Go through a transport's transmitted list or the association's retransmit | ||
1180 | * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. | ||
1181 | * The retransmit list will not have an associated transport. | ||
1182 | * | ||
1183 | * I added coherent debug information output. --xguo | ||
1184 | * | ||
1185 | * Instead of printing 'sacked' or 'kept' for each TSN on the | ||
1186 | * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. | ||
1187 | * KEPT TSN6-TSN7, etc. | ||
1188 | */ | ||
1189 | static void sctp_check_transmitted(struct sctp_outq *q, | ||
1190 | struct list_head *transmitted_queue, | ||
1191 | struct sctp_transport *transport, | ||
1192 | struct sctp_sackhdr *sack, | ||
1193 | __u32 highest_new_tsn_in_sack) | ||
1194 | { | ||
1195 | struct list_head *lchunk; | ||
1196 | struct sctp_chunk *tchunk; | ||
1197 | struct list_head tlist; | ||
1198 | __u32 tsn; | ||
1199 | __u32 sack_ctsn; | ||
1200 | __u32 rtt; | ||
1201 | __u8 restart_timer = 0; | ||
1202 | int bytes_acked = 0; | ||
1203 | |||
1204 | /* These state variables are for coherent debug output. --xguo */ | ||
1205 | |||
1206 | #if SCTP_DEBUG | ||
1207 | __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ | ||
1208 | __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ | ||
1209 | __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ | ||
1210 | __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */ | ||
1211 | |||
1212 | /* 0 : The last TSN was ACKed. | ||
1213 | * 1 : The last TSN was NOT ACKed (i.e. KEPT). | ||
1214 | * -1: We need to initialize. | ||
1215 | */ | ||
1216 | int dbg_prt_state = -1; | ||
1217 | #endif /* SCTP_DEBUG */ | ||
1218 | |||
1219 | sack_ctsn = ntohl(sack->cum_tsn_ack); | ||
1220 | |||
1221 | INIT_LIST_HEAD(&tlist); | ||
1222 | |||
1223 | /* The while loop will skip empty transmitted queues. */ | ||
1224 | while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { | ||
1225 | tchunk = list_entry(lchunk, struct sctp_chunk, | ||
1226 | transmitted_list); | ||
1227 | |||
1228 | if (sctp_chunk_abandoned(tchunk)) { | ||
1229 | /* Move the chunk to abandoned list. */ | ||
1230 | sctp_insert_list(&q->abandoned, lchunk); | ||
1231 | continue; | ||
1232 | } | ||
1233 | |||
1234 | tsn = ntohl(tchunk->subh.data_hdr->tsn); | ||
1235 | if (sctp_acked(sack, tsn)) { | ||
1236 | /* If this queue is the retransmit queue, the | ||
1237 | * retransmit timer has already reclaimed | ||
1238 | * the outstanding bytes for this chunk, so only | ||
1239 | * count bytes associated with a transport. | ||
1240 | */ | ||
1241 | if (transport) { | ||
1242 | /* If this chunk is being used for RTT | ||
1243 | * measurement, calculate the RTT and update | ||
1244 | * the RTO using this value. | ||
1245 | * | ||
1246 | * 6.3.1 C5) Karn's algorithm: RTT measurements | ||
1247 | * MUST NOT be made using packets that were | ||
1248 | * retransmitted (and thus for which it is | ||
1249 | * ambiguous whether the reply was for the | ||
1250 | * first instance of the packet or a later | ||
1251 | * instance). | ||
1252 | */ | ||
1253 | if (!tchunk->tsn_gap_acked && | ||
1254 | !tchunk->resent && | ||
1255 | tchunk->rtt_in_progress) { | ||
1256 | rtt = jiffies - tchunk->sent_at; | ||
1257 | sctp_transport_update_rto(transport, | ||
1258 | rtt); | ||
1259 | } | ||
1260 | } | ||
1261 | if (TSN_lte(tsn, sack_ctsn)) { | ||
1262 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1263 | * | ||
1264 | * R3) Whenever a SACK is received | ||
1265 | * that acknowledges the DATA chunk | ||
1266 | * with the earliest outstanding TSN | ||
1267 | * for that address, restart T3-rtx | ||
1268 | * timer for that address with its | ||
1269 | * current RTO. | ||
1270 | */ | ||
1271 | restart_timer = 1; | ||
1272 | |||
1273 | if (!tchunk->tsn_gap_acked) { | ||
1274 | tchunk->tsn_gap_acked = 1; | ||
1275 | bytes_acked += sctp_data_size(tchunk); | ||
1276 | /* | ||
1277 | * SFR-CACC algorithm: | ||
1278 | * 2) If the SACK contains gap acks | ||
1279 | * and the flag CHANGEOVER_ACTIVE is | ||
1280 | * set the receiver of the SACK MUST | ||
1281 | * take the following action: | ||
1282 | * | ||
1283 | * B) For each TSN t being acked that | ||
1284 | * has not been acked in any SACK so | ||
1285 | * far, set cacc_saw_newack to 1 for | ||
1286 | * the destination that the TSN was | ||
1287 | * sent to. | ||
1288 | */ | ||
1289 | if (transport && | ||
1290 | sack->num_gap_ack_blocks && | ||
1291 | q->asoc->peer.primary_path->cacc. | ||
1292 | changeover_active) | ||
1293 | transport->cacc.cacc_saw_newack | ||
1294 | = 1; | ||
1295 | } | ||
1296 | |||
1297 | list_add_tail(&tchunk->transmitted_list, | ||
1298 | &q->sacked); | ||
1299 | } else { | ||
1300 | /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 | ||
1301 | * M2) Each time a SACK arrives reporting | ||
1302 | * 'Stray DATA chunk(s)' record the highest TSN | ||
1303 | * reported as newly acknowledged, call this | ||
1304 | * value 'HighestTSNinSack'. A newly | ||
1305 | * acknowledged DATA chunk is one not | ||
1306 | * previously acknowledged in a SACK. | ||
1307 | * | ||
1308 | * When the SCTP sender of data receives a SACK | ||
1309 | * chunk that acknowledges, for the first time, | ||
1310 | * the receipt of a DATA chunk, all the still | ||
1311 | * unacknowledged DATA chunks whose TSN is | ||
1312 | * older than that newly acknowledged DATA | ||
1313 | * chunk, are qualified as 'Stray DATA chunks'. | ||
1314 | */ | ||
1315 | if (!tchunk->tsn_gap_acked) { | ||
1316 | tchunk->tsn_gap_acked = 1; | ||
1317 | bytes_acked += sctp_data_size(tchunk); | ||
1318 | } | ||
1319 | list_add_tail(lchunk, &tlist); | ||
1320 | } | ||
1321 | |||
1322 | #if SCTP_DEBUG | ||
1323 | switch (dbg_prt_state) { | ||
1324 | case 0: /* last TSN was ACKed */ | ||
1325 | if (dbg_last_ack_tsn + 1 == tsn) { | ||
1326 | /* This TSN belongs to the | ||
1327 | * current ACK range. | ||
1328 | */ | ||
1329 | break; | ||
1330 | } | ||
1331 | |||
1332 | if (dbg_last_ack_tsn != dbg_ack_tsn) { | ||
1333 | /* Display the end of the | ||
1334 | * current range. | ||
1335 | */ | ||
1336 | SCTP_DEBUG_PRINTK("-%08x", | ||
1337 | dbg_last_ack_tsn); | ||
1338 | } | ||
1339 | |||
1340 | /* Start a new range. */ | ||
1341 | SCTP_DEBUG_PRINTK(",%08x", tsn); | ||
1342 | dbg_ack_tsn = tsn; | ||
1343 | break; | ||
1344 | |||
1345 | case 1: /* The last TSN was NOT ACKed. */ | ||
1346 | if (dbg_last_kept_tsn != dbg_kept_tsn) { | ||
1347 | /* Display the end of current range. */ | ||
1348 | SCTP_DEBUG_PRINTK("-%08x", | ||
1349 | dbg_last_kept_tsn); | ||
1350 | } | ||
1351 | |||
1352 | SCTP_DEBUG_PRINTK("\n"); | ||
1353 | |||
1354 | /* FALL THROUGH... */ | ||
1355 | default: | ||
1356 | /* This is the first-ever TSN we examined. */ | ||
1357 | /* Start a new range of ACK-ed TSNs. */ | ||
1358 | SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); | ||
1359 | dbg_prt_state = 0; | ||
1360 | dbg_ack_tsn = tsn; | ||
1361 | }; | ||
1362 | |||
1363 | dbg_last_ack_tsn = tsn; | ||
1364 | #endif /* SCTP_DEBUG */ | ||
1365 | |||
1366 | } else { | ||
1367 | if (tchunk->tsn_gap_acked) { | ||
1368 | SCTP_DEBUG_PRINTK("%s: Receiver reneged on " | ||
1369 | "data TSN: 0x%x\n", | ||
1370 | __FUNCTION__, | ||
1371 | tsn); | ||
1372 | tchunk->tsn_gap_acked = 0; | ||
1373 | |||
1374 | bytes_acked -= sctp_data_size(tchunk); | ||
1375 | |||
1376 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1377 | * | ||
1378 | * R4) Whenever a SACK is received missing a | ||
1379 | * TSN that was previously acknowledged via a | ||
1380 | * Gap Ack Block, start T3-rtx for the | ||
1381 | * destination address to which the DATA | ||
1382 | * chunk was originally | ||
1383 | * transmitted if it is not already running. | ||
1384 | */ | ||
1385 | restart_timer = 1; | ||
1386 | } | ||
1387 | |||
1388 | list_add_tail(lchunk, &tlist); | ||
1389 | |||
1390 | #if SCTP_DEBUG | ||
1391 | /* See the above comments on ACK-ed TSNs. */ | ||
1392 | switch (dbg_prt_state) { | ||
1393 | case 1: | ||
1394 | if (dbg_last_kept_tsn + 1 == tsn) | ||
1395 | break; | ||
1396 | |||
1397 | if (dbg_last_kept_tsn != dbg_kept_tsn) | ||
1398 | SCTP_DEBUG_PRINTK("-%08x", | ||
1399 | dbg_last_kept_tsn); | ||
1400 | |||
1401 | SCTP_DEBUG_PRINTK(",%08x", tsn); | ||
1402 | dbg_kept_tsn = tsn; | ||
1403 | break; | ||
1404 | |||
1405 | case 0: | ||
1406 | if (dbg_last_ack_tsn != dbg_ack_tsn) | ||
1407 | SCTP_DEBUG_PRINTK("-%08x", | ||
1408 | dbg_last_ack_tsn); | ||
1409 | SCTP_DEBUG_PRINTK("\n"); | ||
1410 | |||
1411 | /* FALL THROUGH... */ | ||
1412 | default: | ||
1413 | SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); | ||
1414 | dbg_prt_state = 1; | ||
1415 | dbg_kept_tsn = tsn; | ||
1416 | }; | ||
1417 | |||
1418 | dbg_last_kept_tsn = tsn; | ||
1419 | #endif /* SCTP_DEBUG */ | ||
1420 | } | ||
1421 | } | ||
1422 | |||
1423 | #if SCTP_DEBUG | ||
1424 | /* Finish off the last range, displaying its ending TSN. */ | ||
1425 | switch (dbg_prt_state) { | ||
1426 | case 0: | ||
1427 | if (dbg_last_ack_tsn != dbg_ack_tsn) { | ||
1428 | SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn); | ||
1429 | } else { | ||
1430 | SCTP_DEBUG_PRINTK("\n"); | ||
1431 | } | ||
1432 | break; | ||
1433 | |||
1434 | case 1: | ||
1435 | if (dbg_last_kept_tsn != dbg_kept_tsn) { | ||
1436 | SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn); | ||
1437 | } else { | ||
1438 | SCTP_DEBUG_PRINTK("\n"); | ||
1439 | } | ||
1440 | }; | ||
1441 | #endif /* SCTP_DEBUG */ | ||
1442 | if (transport) { | ||
1443 | if (bytes_acked) { | ||
1444 | /* 8.2. When an outstanding TSN is acknowledged, | ||
1445 | * the endpoint shall clear the error counter of | ||
1446 | * the destination transport address to which the | ||
1447 | * DATA chunk was last sent. | ||
1448 | * The association's overall error counter is | ||
1449 | * also cleared. | ||
1450 | */ | ||
1451 | transport->error_count = 0; | ||
1452 | transport->asoc->overall_error_count = 0; | ||
1453 | |||
1454 | /* Mark the destination transport address as | ||
1455 | * active if it is not so marked. | ||
1456 | */ | ||
1457 | if (!transport->active) { | ||
1458 | sctp_assoc_control_transport( | ||
1459 | transport->asoc, | ||
1460 | transport, | ||
1461 | SCTP_TRANSPORT_UP, | ||
1462 | SCTP_RECEIVED_SACK); | ||
1463 | } | ||
1464 | |||
1465 | sctp_transport_raise_cwnd(transport, sack_ctsn, | ||
1466 | bytes_acked); | ||
1467 | |||
1468 | transport->flight_size -= bytes_acked; | ||
1469 | q->outstanding_bytes -= bytes_acked; | ||
1470 | } else { | ||
1471 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 | ||
1472 | * When a sender is doing zero window probing, it | ||
1473 | * should not timeout the association if it continues | ||
1474 | * to receive new packets from the receiver. The | ||
1475 | * reason is that the receiver MAY keep its window | ||
1476 | * closed for an indefinite time. | ||
1477 | * A sender is doing zero window probing when the | ||
1478 | * receiver's advertised window is zero, and there is | ||
1479 | * only one data chunk in flight to the receiver. | ||
1480 | */ | ||
1481 | if (!q->asoc->peer.rwnd && | ||
1482 | !list_empty(&tlist) && | ||
1483 | (sack_ctsn+2 == q->asoc->next_tsn)) { | ||
1484 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " | ||
1485 | "window probe: %u\n", | ||
1486 | __FUNCTION__, sack_ctsn); | ||
1487 | q->asoc->overall_error_count = 0; | ||
1488 | transport->error_count = 0; | ||
1489 | } | ||
1490 | } | ||
1491 | |||
1492 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1493 | * | ||
1494 | * R2) Whenever all outstanding data sent to an address have | ||
1495 | * been acknowledged, turn off the T3-rtx timer of that | ||
1496 | * address. | ||
1497 | */ | ||
1498 | if (!transport->flight_size) { | ||
1499 | if (timer_pending(&transport->T3_rtx_timer) && | ||
1500 | del_timer(&transport->T3_rtx_timer)) { | ||
1501 | sctp_transport_put(transport); | ||
1502 | } | ||
1503 | } else if (restart_timer) { | ||
1504 | if (!mod_timer(&transport->T3_rtx_timer, | ||
1505 | jiffies + transport->rto)) | ||
1506 | sctp_transport_hold(transport); | ||
1507 | } | ||
1508 | } | ||
1509 | |||
1510 | list_splice(&tlist, transmitted_queue); | ||
1511 | } | ||
1512 | |||
1513 | /* Mark chunks as missing and consequently may get retransmitted. */ | ||
1514 | static void sctp_mark_missing(struct sctp_outq *q, | ||
1515 | struct list_head *transmitted_queue, | ||
1516 | struct sctp_transport *transport, | ||
1517 | __u32 highest_new_tsn_in_sack, | ||
1518 | int count_of_newacks) | ||
1519 | { | ||
1520 | struct sctp_chunk *chunk; | ||
1521 | struct list_head *pos; | ||
1522 | __u32 tsn; | ||
1523 | char do_fast_retransmit = 0; | ||
1524 | struct sctp_transport *primary = q->asoc->peer.primary_path; | ||
1525 | |||
1526 | list_for_each(pos, transmitted_queue) { | ||
1527 | |||
1528 | chunk = list_entry(pos, struct sctp_chunk, transmitted_list); | ||
1529 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
1530 | |||
1531 | /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all | ||
1532 | * 'Unacknowledged TSN's', if the TSN number of an | ||
1533 | * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' | ||
1534 | * value, increment the 'TSN.Missing.Report' count on that | ||
1535 | * chunk if it has NOT been fast retransmitted or marked for | ||
1536 | * fast retransmit already. | ||
1537 | */ | ||
1538 | if (!chunk->fast_retransmit && | ||
1539 | !chunk->tsn_gap_acked && | ||
1540 | TSN_lt(tsn, highest_new_tsn_in_sack)) { | ||
1541 | |||
1542 | /* SFR-CACC may require us to skip marking | ||
1543 | * this chunk as missing. | ||
1544 | */ | ||
1545 | if (!transport || !sctp_cacc_skip(primary, transport, | ||
1546 | count_of_newacks, tsn)) { | ||
1547 | chunk->tsn_missing_report++; | ||
1548 | |||
1549 | SCTP_DEBUG_PRINTK( | ||
1550 | "%s: TSN 0x%x missing counter: %d\n", | ||
1551 | __FUNCTION__, tsn, | ||
1552 | chunk->tsn_missing_report); | ||
1553 | } | ||
1554 | } | ||
1555 | /* | ||
1556 | * M4) If any DATA chunk is found to have a | ||
1557 | * 'TSN.Missing.Report' | ||
1558 | * value larger than or equal to 4, mark that chunk for | ||
1559 | * retransmission and start the fast retransmit procedure. | ||
1560 | */ | ||
1561 | |||
1562 | if (chunk->tsn_missing_report >= 4) { | ||
1563 | chunk->fast_retransmit = 1; | ||
1564 | do_fast_retransmit = 1; | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | if (transport) { | ||
1569 | if (do_fast_retransmit) | ||
1570 | sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); | ||
1571 | |||
1572 | SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " | ||
1573 | "ssthresh: %d, flight_size: %d, pba: %d\n", | ||
1574 | __FUNCTION__, transport, transport->cwnd, | ||
1575 | transport->ssthresh, transport->flight_size, | ||
1576 | transport->partial_bytes_acked); | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | /* Is the given TSN acked by this packet? */ | ||
1581 | static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) | ||
1582 | { | ||
1583 | int i; | ||
1584 | sctp_sack_variable_t *frags; | ||
1585 | __u16 gap; | ||
1586 | __u32 ctsn = ntohl(sack->cum_tsn_ack); | ||
1587 | |||
1588 | if (TSN_lte(tsn, ctsn)) | ||
1589 | goto pass; | ||
1590 | |||
1591 | /* 3.3.4 Selective Acknowledgement (SACK) (3): | ||
1592 | * | ||
1593 | * Gap Ack Blocks: | ||
1594 | * These fields contain the Gap Ack Blocks. They are repeated | ||
1595 | * for each Gap Ack Block up to the number of Gap Ack Blocks | ||
1596 | * defined in the Number of Gap Ack Blocks field. All DATA | ||
1597 | * chunks with TSNs greater than or equal to (Cumulative TSN | ||
1598 | * Ack + Gap Ack Block Start) and less than or equal to | ||
1599 | * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack | ||
1600 | * Block are assumed to have been received correctly. | ||
1601 | */ | ||
1602 | |||
1603 | frags = sack->variable; | ||
1604 | gap = tsn - ctsn; | ||
1605 | for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { | ||
1606 | if (TSN_lte(ntohs(frags[i].gab.start), gap) && | ||
1607 | TSN_lte(gap, ntohs(frags[i].gab.end))) | ||
1608 | goto pass; | ||
1609 | } | ||
1610 | |||
1611 | return 0; | ||
1612 | pass: | ||
1613 | return 1; | ||
1614 | } | ||
1615 | |||
1616 | static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, | ||
1617 | int nskips, __u16 stream) | ||
1618 | { | ||
1619 | int i; | ||
1620 | |||
1621 | for (i = 0; i < nskips; i++) { | ||
1622 | if (skiplist[i].stream == stream) | ||
1623 | return i; | ||
1624 | } | ||
1625 | return i; | ||
1626 | } | ||
1627 | |||
1628 | /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ | ||
1629 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | ||
1630 | { | ||
1631 | struct sctp_association *asoc = q->asoc; | ||
1632 | struct sctp_chunk *ftsn_chunk = NULL; | ||
1633 | struct sctp_fwdtsn_skip ftsn_skip_arr[10]; | ||
1634 | int nskips = 0; | ||
1635 | int skip_pos = 0; | ||
1636 | __u32 tsn; | ||
1637 | struct sctp_chunk *chunk; | ||
1638 | struct list_head *lchunk, *temp; | ||
1639 | |||
1640 | /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the | ||
1641 | * received SACK. | ||
1642 | * | ||
1643 | * If (Advanced.Peer.Ack.Point < SackCumAck), then update | ||
1644 | * Advanced.Peer.Ack.Point to be equal to SackCumAck. | ||
1645 | */ | ||
1646 | if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) | ||
1647 | asoc->adv_peer_ack_point = ctsn; | ||
1648 | |||
1649 | /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" | ||
1650 | * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as | ||
1651 | * the chunk next in the out-queue space is marked as "abandoned" as | ||
1652 | * shown in the following example: | ||
1653 | * | ||
1654 | * Assuming that a SACK arrived with the Cumulative TSN ACK 102 | ||
1655 | * and the Advanced.Peer.Ack.Point is updated to this value: | ||
1656 | * | ||
1657 | * out-queue at the end of ==> out-queue after Adv.Ack.Point | ||
1658 | * normal SACK processing local advancement | ||
1659 | * ... ... | ||
1660 | * Adv.Ack.Pt-> 102 acked 102 acked | ||
1661 | * 103 abandoned 103 abandoned | ||
1662 | * 104 abandoned Adv.Ack.P-> 104 abandoned | ||
1663 | * 105 105 | ||
1664 | * 106 acked 106 acked | ||
1665 | * ... ... | ||
1666 | * | ||
1667 | * In this example, the data sender successfully advanced the | ||
1668 | * "Advanced.Peer.Ack.Point" from 102 to 104 locally. | ||
1669 | */ | ||
1670 | list_for_each_safe(lchunk, temp, &q->abandoned) { | ||
1671 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
1672 | transmitted_list); | ||
1673 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
1674 | |||
1675 | /* Remove any chunks in the abandoned queue that are acked by | ||
1676 | * the ctsn. | ||
1677 | */ | ||
1678 | if (TSN_lte(tsn, ctsn)) { | ||
1679 | list_del_init(lchunk); | ||
1680 | if (!chunk->tsn_gap_acked) { | ||
1681 | chunk->transport->flight_size -= | ||
1682 | sctp_data_size(chunk); | ||
1683 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
1684 | } | ||
1685 | sctp_chunk_free(chunk); | ||
1686 | } else { | ||
1687 | if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { | ||
1688 | asoc->adv_peer_ack_point = tsn; | ||
1689 | if (chunk->chunk_hdr->flags & | ||
1690 | SCTP_DATA_UNORDERED) | ||
1691 | continue; | ||
1692 | skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], | ||
1693 | nskips, | ||
1694 | chunk->subh.data_hdr->stream); | ||
1695 | ftsn_skip_arr[skip_pos].stream = | ||
1696 | chunk->subh.data_hdr->stream; | ||
1697 | ftsn_skip_arr[skip_pos].ssn = | ||
1698 | chunk->subh.data_hdr->ssn; | ||
1699 | if (skip_pos == nskips) | ||
1700 | nskips++; | ||
1701 | if (nskips == 10) | ||
1702 | break; | ||
1703 | } else | ||
1704 | break; | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1708 | /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" | ||
1709 | * is greater than the Cumulative TSN ACK carried in the received | ||
1710 | * SACK, the data sender MUST send the data receiver a FORWARD TSN | ||
1711 | * chunk containing the latest value of the | ||
1712 | * "Advanced.Peer.Ack.Point". | ||
1713 | * | ||
1714 | * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD | ||
1715 | * list each stream and sequence number in the forwarded TSN. This | ||
1716 | * information will enable the receiver to easily find any | ||
1717 | * stranded TSN's waiting on stream reorder queues. Each stream | ||
1718 | * SHOULD only be reported once; this means that if multiple | ||
1719 | * abandoned messages occur in the same stream then only the | ||
1720 | * highest abandoned stream sequence number is reported. If the | ||
1721 | * total size of the FORWARD TSN does NOT fit in a single MTU then | ||
1722 | * the sender of the FORWARD TSN SHOULD lower the | ||
1723 | * Advanced.Peer.Ack.Point to the last TSN that will fit in a | ||
1724 | * single MTU. | ||
1725 | */ | ||
1726 | if (asoc->adv_peer_ack_point > ctsn) | ||
1727 | ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, | ||
1728 | nskips, &ftsn_skip_arr[0]); | ||
1729 | |||
1730 | if (ftsn_chunk) { | ||
1731 | __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); | ||
1732 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
1733 | } | ||
1734 | } | ||