diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/associola.c | 93 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 21 | ||||
-rw-r--r-- | net/sctp/chunk.c | 62 | ||||
-rw-r--r-- | net/sctp/debug.c | 14 | ||||
-rw-r--r-- | net/sctp/output.c | 188 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 47 | ||||
-rw-r--r-- | net/sctp/proc.c | 4 | ||||
-rw-r--r-- | net/sctp/protocol.c | 11 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 25 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 56 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 68 | ||||
-rw-r--r-- | net/sctp/socket.c | 38 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 12 | ||||
-rw-r--r-- | net/sctp/transport.c | 3 |
14 files changed, 467 insertions, 175 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 525864bf4f07..8450960df24f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -112,6 +112,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
112 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) | 112 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) |
113 | * 1000; | 113 | * 1000; |
114 | asoc->frag_point = 0; | 114 | asoc->frag_point = 0; |
115 | asoc->user_frag = sp->user_frag; | ||
115 | 116 | ||
116 | /* Set the association max_retrans and RTO values from the | 117 | /* Set the association max_retrans and RTO values from the |
117 | * socket values. | 118 | * socket values. |
@@ -202,6 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
202 | asoc->a_rwnd = asoc->rwnd; | 203 | asoc->a_rwnd = asoc->rwnd; |
203 | 204 | ||
204 | asoc->rwnd_over = 0; | 205 | asoc->rwnd_over = 0; |
206 | asoc->rwnd_press = 0; | ||
205 | 207 | ||
206 | /* Use my own max window until I learn something better. */ | 208 | /* Use my own max window until I learn something better. */ |
207 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; | 209 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; |
@@ -582,6 +584,33 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, | |||
582 | asoc->addip_last_asconf->transport == peer) | 584 | asoc->addip_last_asconf->transport == peer) |
583 | asoc->addip_last_asconf->transport = NULL; | 585 | asoc->addip_last_asconf->transport = NULL; |
584 | 586 | ||
587 | /* If we have something on the transmitted list, we have to | ||
588 | * save it off. The best place is the active path. | ||
589 | */ | ||
590 | if (!list_empty(&peer->transmitted)) { | ||
591 | struct sctp_transport *active = asoc->peer.active_path; | ||
592 | struct sctp_chunk *ch; | ||
593 | |||
594 | /* Reset the transport of each chunk on this list */ | ||
595 | list_for_each_entry(ch, &peer->transmitted, | ||
596 | transmitted_list) { | ||
597 | ch->transport = NULL; | ||
598 | ch->rtt_in_progress = 0; | ||
599 | } | ||
600 | |||
601 | list_splice_tail_init(&peer->transmitted, | ||
602 | &active->transmitted); | ||
603 | |||
604 | /* Start a T3 timer here in case it wasn't running so | ||
605 | * that these migrated packets have a chance to get | ||
606 | * retrnasmitted. | ||
607 | */ | ||
608 | if (!timer_pending(&active->T3_rtx_timer)) | ||
609 | if (!mod_timer(&active->T3_rtx_timer, | ||
610 | jiffies + active->rto)) | ||
611 | sctp_transport_hold(active); | ||
612 | } | ||
613 | |||
585 | asoc->peer.transport_count--; | 614 | asoc->peer.transport_count--; |
586 | 615 | ||
587 | sctp_transport_free(peer); | 616 | sctp_transport_free(peer); |
@@ -651,13 +680,15 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
651 | */ | 680 | */ |
652 | peer->param_flags = asoc->param_flags; | 681 | peer->param_flags = asoc->param_flags; |
653 | 682 | ||
683 | sctp_transport_route(peer, NULL, sp); | ||
684 | |||
654 | /* Initialize the pmtu of the transport. */ | 685 | /* Initialize the pmtu of the transport. */ |
655 | if (peer->param_flags & SPP_PMTUD_ENABLE) | 686 | if (peer->param_flags & SPP_PMTUD_DISABLE) { |
656 | sctp_transport_pmtu(peer); | 687 | if (asoc->pathmtu) |
657 | else if (asoc->pathmtu) | 688 | peer->pathmtu = asoc->pathmtu; |
658 | peer->pathmtu = asoc->pathmtu; | 689 | else |
659 | else | 690 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; |
660 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; | 691 | } |
661 | 692 | ||
662 | /* If this is the first transport addr on this association, | 693 | /* If this is the first transport addr on this association, |
663 | * initialize the association PMTU to the peer's PMTU. | 694 | * initialize the association PMTU to the peer's PMTU. |
@@ -673,7 +704,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | |||
673 | "%d\n", asoc, asoc->pathmtu); | 704 | "%d\n", asoc, asoc->pathmtu); |
674 | peer->pmtu_pending = 0; | 705 | peer->pmtu_pending = 0; |
675 | 706 | ||
676 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | 707 | asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); |
677 | 708 | ||
678 | /* The asoc->peer.port might not be meaningful yet, but | 709 | /* The asoc->peer.port might not be meaningful yet, but |
679 | * initialize the packet structure anyway. | 710 | * initialize the packet structure anyway. |
@@ -810,11 +841,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
810 | break; | 841 | break; |
811 | 842 | ||
812 | case SCTP_TRANSPORT_DOWN: | 843 | case SCTP_TRANSPORT_DOWN: |
813 | /* if the transort was never confirmed, do not transition it | 844 | /* If the transport was never confirmed, do not transition it |
814 | * to inactive state. | 845 | * to inactive state. Also, release the cached route since |
846 | * there may be a better route next time. | ||
815 | */ | 847 | */ |
816 | if (transport->state != SCTP_UNCONFIRMED) | 848 | if (transport->state != SCTP_UNCONFIRMED) |
817 | transport->state = SCTP_INACTIVE; | 849 | transport->state = SCTP_INACTIVE; |
850 | else { | ||
851 | dst_release(transport->dst); | ||
852 | transport->dst = NULL; | ||
853 | } | ||
818 | 854 | ||
819 | spc_state = SCTP_ADDR_UNREACHABLE; | 855 | spc_state = SCTP_ADDR_UNREACHABLE; |
820 | break; | 856 | break; |
@@ -1324,9 +1360,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | |||
1324 | } | 1360 | } |
1325 | 1361 | ||
1326 | if (pmtu) { | 1362 | if (pmtu) { |
1327 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
1328 | asoc->pathmtu = pmtu; | 1363 | asoc->pathmtu = pmtu; |
1329 | asoc->frag_point = sctp_frag_point(sp, pmtu); | 1364 | asoc->frag_point = sctp_frag_point(asoc, pmtu); |
1330 | } | 1365 | } |
1331 | 1366 | ||
1332 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", | 1367 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", |
@@ -1369,6 +1404,17 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1369 | asoc->rwnd += len; | 1404 | asoc->rwnd += len; |
1370 | } | 1405 | } |
1371 | 1406 | ||
1407 | /* If we had window pressure, start recovering it | ||
1408 | * once our rwnd had reached the accumulated pressure | ||
1409 | * threshold. The idea is to recover slowly, but up | ||
1410 | * to the initial advertised window. | ||
1411 | */ | ||
1412 | if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { | ||
1413 | int change = min(asoc->pathmtu, asoc->rwnd_press); | ||
1414 | asoc->rwnd += change; | ||
1415 | asoc->rwnd_press -= change; | ||
1416 | } | ||
1417 | |||
1372 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " | 1418 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " |
1373 | "- %u\n", __func__, asoc, len, asoc->rwnd, | 1419 | "- %u\n", __func__, asoc, len, asoc->rwnd, |
1374 | asoc->rwnd_over, asoc->a_rwnd); | 1420 | asoc->rwnd_over, asoc->a_rwnd); |
@@ -1401,17 +1447,38 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | |||
1401 | /* Decrease asoc's rwnd by len. */ | 1447 | /* Decrease asoc's rwnd by len. */ |
1402 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | 1448 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) |
1403 | { | 1449 | { |
1450 | int rx_count; | ||
1451 | int over = 0; | ||
1452 | |||
1404 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); | 1453 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); |
1405 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); | 1454 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); |
1455 | |||
1456 | if (asoc->ep->rcvbuf_policy) | ||
1457 | rx_count = atomic_read(&asoc->rmem_alloc); | ||
1458 | else | ||
1459 | rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); | ||
1460 | |||
1461 | /* If we've reached or overflowed our receive buffer, announce | ||
1462 | * a 0 rwnd if rwnd would still be positive. Store the | ||
1463 | * the pottential pressure overflow so that the window can be restored | ||
1464 | * back to original value. | ||
1465 | */ | ||
1466 | if (rx_count >= asoc->base.sk->sk_rcvbuf) | ||
1467 | over = 1; | ||
1468 | |||
1406 | if (asoc->rwnd >= len) { | 1469 | if (asoc->rwnd >= len) { |
1407 | asoc->rwnd -= len; | 1470 | asoc->rwnd -= len; |
1471 | if (over) { | ||
1472 | asoc->rwnd_press = asoc->rwnd; | ||
1473 | asoc->rwnd = 0; | ||
1474 | } | ||
1408 | } else { | 1475 | } else { |
1409 | asoc->rwnd_over = len - asoc->rwnd; | 1476 | asoc->rwnd_over = len - asoc->rwnd; |
1410 | asoc->rwnd = 0; | 1477 | asoc->rwnd = 0; |
1411 | } | 1478 | } |
1412 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", | 1479 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n", |
1413 | __func__, asoc, len, asoc->rwnd, | 1480 | __func__, asoc, len, asoc->rwnd, |
1414 | asoc->rwnd_over); | 1481 | asoc->rwnd_over, asoc->rwnd_press); |
1415 | } | 1482 | } |
1416 | 1483 | ||
1417 | /* Build the bind address list for the association based on info from the | 1484 | /* Build the bind address list for the association based on info from the |
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 6d5944a745d4..13a6fba41077 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c | |||
@@ -510,9 +510,28 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope) | |||
510 | * of requested destination address, sender and receiver | 510 | * of requested destination address, sender and receiver |
511 | * SHOULD include all of its addresses with level greater | 511 | * SHOULD include all of its addresses with level greater |
512 | * than or equal to L. | 512 | * than or equal to L. |
513 | * | ||
514 | * Address scoping can be selectively controlled via sysctl | ||
515 | * option | ||
513 | */ | 516 | */ |
514 | if (addr_scope <= scope) | 517 | switch (sctp_scope_policy) { |
518 | case SCTP_SCOPE_POLICY_DISABLE: | ||
515 | return 1; | 519 | return 1; |
520 | case SCTP_SCOPE_POLICY_ENABLE: | ||
521 | if (addr_scope <= scope) | ||
522 | return 1; | ||
523 | break; | ||
524 | case SCTP_SCOPE_POLICY_PRIVATE: | ||
525 | if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope) | ||
526 | return 1; | ||
527 | break; | ||
528 | case SCTP_SCOPE_POLICY_LINK: | ||
529 | if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope) | ||
530 | return 1; | ||
531 | break; | ||
532 | default: | ||
533 | break; | ||
534 | } | ||
516 | 535 | ||
517 | return 0; | 536 | return 0; |
518 | } | 537 | } |
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 1748ef90950c..acf7c4d128f7 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c | |||
@@ -59,6 +59,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) | |||
59 | msg->can_abandon = 0; | 59 | msg->can_abandon = 0; |
60 | msg->expires_at = 0; | 60 | msg->expires_at = 0; |
61 | INIT_LIST_HEAD(&msg->chunks); | 61 | INIT_LIST_HEAD(&msg->chunks); |
62 | msg->msg_size = 0; | ||
62 | } | 63 | } |
63 | 64 | ||
64 | /* Allocate and initialize datamsg. */ | 65 | /* Allocate and initialize datamsg. */ |
@@ -73,6 +74,19 @@ SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) | |||
73 | return msg; | 74 | return msg; |
74 | } | 75 | } |
75 | 76 | ||
77 | void sctp_datamsg_free(struct sctp_datamsg *msg) | ||
78 | { | ||
79 | struct sctp_chunk *chunk; | ||
80 | |||
81 | /* This doesn't have to be a _safe vairant because | ||
82 | * sctp_chunk_free() only drops the refs. | ||
83 | */ | ||
84 | list_for_each_entry(chunk, &msg->chunks, frag_list) | ||
85 | sctp_chunk_free(chunk); | ||
86 | |||
87 | sctp_datamsg_put(msg); | ||
88 | } | ||
89 | |||
76 | /* Final destructruction of datamsg memory. */ | 90 | /* Final destructruction of datamsg memory. */ |
77 | static void sctp_datamsg_destroy(struct sctp_datamsg *msg) | 91 | static void sctp_datamsg_destroy(struct sctp_datamsg *msg) |
78 | { | 92 | { |
@@ -142,6 +156,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu | |||
142 | { | 156 | { |
143 | sctp_datamsg_hold(msg); | 157 | sctp_datamsg_hold(msg); |
144 | chunk->msg = msg; | 158 | chunk->msg = msg; |
159 | msg->msg_size += chunk->skb->len; | ||
145 | } | 160 | } |
146 | 161 | ||
147 | 162 | ||
@@ -158,6 +173,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
158 | { | 173 | { |
159 | int max, whole, i, offset, over, err; | 174 | int max, whole, i, offset, over, err; |
160 | int len, first_len; | 175 | int len, first_len; |
176 | int max_data; | ||
161 | struct sctp_chunk *chunk; | 177 | struct sctp_chunk *chunk; |
162 | struct sctp_datamsg *msg; | 178 | struct sctp_datamsg *msg; |
163 | struct list_head *pos, *temp; | 179 | struct list_head *pos, *temp; |
@@ -179,8 +195,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
179 | __func__, msg, msg->expires_at, jiffies); | 195 | __func__, msg, msg->expires_at, jiffies); |
180 | } | 196 | } |
181 | 197 | ||
182 | max = asoc->frag_point; | 198 | /* This is the biggest possible DATA chunk that can fit into |
199 | * the packet | ||
200 | */ | ||
201 | max_data = asoc->pathmtu - | ||
202 | sctp_sk(asoc->base.sk)->pf->af->net_header_len - | ||
203 | sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk); | ||
183 | 204 | ||
205 | max = asoc->frag_point; | ||
184 | /* If the the peer requested that we authenticate DATA chunks | 206 | /* If the the peer requested that we authenticate DATA chunks |
185 | * we need to accound for bundling of the AUTH chunks along with | 207 | * we need to accound for bundling of the AUTH chunks along with |
186 | * DATA. | 208 | * DATA. |
@@ -189,23 +211,41 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | |||
189 | struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); | 211 | struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); |
190 | 212 | ||
191 | if (hmac_desc) | 213 | if (hmac_desc) |
192 | max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + | 214 | max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + |
193 | hmac_desc->hmac_len); | 215 | hmac_desc->hmac_len); |
194 | } | 216 | } |
195 | 217 | ||
218 | /* Now, check if we need to reduce our max */ | ||
219 | if (max > max_data) | ||
220 | max = max_data; | ||
221 | |||
196 | whole = 0; | 222 | whole = 0; |
197 | first_len = max; | 223 | first_len = max; |
198 | 224 | ||
225 | /* Check to see if we have a pending SACK and try to let it be bundled | ||
226 | * with this message. Do this if we don't have any data queued already. | ||
227 | * To check that, look at out_qlen and retransmit list. | ||
228 | * NOTE: we will not reduce to account for SACK, if the message would | ||
229 | * not have been fragmented. | ||
230 | */ | ||
231 | if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) && | ||
232 | asoc->outqueue.out_qlen == 0 && | ||
233 | list_empty(&asoc->outqueue.retransmit) && | ||
234 | msg_len > max) | ||
235 | max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t)); | ||
236 | |||
199 | /* Encourage Cookie-ECHO bundling. */ | 237 | /* Encourage Cookie-ECHO bundling. */ |
200 | if (asoc->state < SCTP_STATE_COOKIE_ECHOED) { | 238 | if (asoc->state < SCTP_STATE_COOKIE_ECHOED) |
201 | whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN); | 239 | max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN; |
202 | 240 | ||
203 | /* Account for the DATA to be bundled with the COOKIE-ECHO. */ | 241 | /* Now that we adjusted completely, reset first_len */ |
204 | if (whole) { | 242 | if (first_len > max_data) |
205 | first_len = max - SCTP_ARBITRARY_COOKIE_ECHO_LEN; | 243 | first_len = max_data; |
206 | msg_len -= first_len; | 244 | |
207 | whole = 1; | 245 | /* Account for a different sized first fragment */ |
208 | } | 246 | if (msg_len >= first_len) { |
247 | msg_len -= first_len; | ||
248 | whole = 1; | ||
209 | } | 249 | } |
210 | 250 | ||
211 | /* How many full sized? How many bytes leftover? */ | 251 | /* How many full sized? How many bytes leftover? */ |
diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 7ff548a30cfb..bf24fa697de2 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c | |||
@@ -52,7 +52,7 @@ int sctp_debug_flag = 1; /* Initially enable DEBUG */ | |||
52 | #endif /* SCTP_DEBUG */ | 52 | #endif /* SCTP_DEBUG */ |
53 | 53 | ||
54 | /* These are printable forms of Chunk ID's from section 3.1. */ | 54 | /* These are printable forms of Chunk ID's from section 3.1. */ |
55 | static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { | 55 | static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { |
56 | "DATA", | 56 | "DATA", |
57 | "INIT", | 57 | "INIT", |
58 | "INIT_ACK", | 58 | "INIT_ACK", |
@@ -97,7 +97,7 @@ const char *sctp_cname(const sctp_subtype_t cid) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* These are printable forms of the states. */ | 99 | /* These are printable forms of the states. */ |
100 | const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = { | 100 | const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { |
101 | "STATE_EMPTY", | 101 | "STATE_EMPTY", |
102 | "STATE_CLOSED", | 102 | "STATE_CLOSED", |
103 | "STATE_COOKIE_WAIT", | 103 | "STATE_COOKIE_WAIT", |
@@ -110,7 +110,7 @@ const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | /* Events that could change the state of an association. */ | 112 | /* Events that could change the state of an association. */ |
113 | const char *sctp_evttype_tbl[] = { | 113 | const char *const sctp_evttype_tbl[] = { |
114 | "EVENT_T_unknown", | 114 | "EVENT_T_unknown", |
115 | "EVENT_T_CHUNK", | 115 | "EVENT_T_CHUNK", |
116 | "EVENT_T_TIMEOUT", | 116 | "EVENT_T_TIMEOUT", |
@@ -119,7 +119,7 @@ const char *sctp_evttype_tbl[] = { | |||
119 | }; | 119 | }; |
120 | 120 | ||
121 | /* Return value of a state function */ | 121 | /* Return value of a state function */ |
122 | const char *sctp_status_tbl[] = { | 122 | const char *const sctp_status_tbl[] = { |
123 | "DISPOSITION_DISCARD", | 123 | "DISPOSITION_DISCARD", |
124 | "DISPOSITION_CONSUME", | 124 | "DISPOSITION_CONSUME", |
125 | "DISPOSITION_NOMEM", | 125 | "DISPOSITION_NOMEM", |
@@ -132,7 +132,7 @@ const char *sctp_status_tbl[] = { | |||
132 | }; | 132 | }; |
133 | 133 | ||
134 | /* Printable forms of primitives */ | 134 | /* Printable forms of primitives */ |
135 | static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { | 135 | static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { |
136 | "PRIMITIVE_ASSOCIATE", | 136 | "PRIMITIVE_ASSOCIATE", |
137 | "PRIMITIVE_SHUTDOWN", | 137 | "PRIMITIVE_SHUTDOWN", |
138 | "PRIMITIVE_ABORT", | 138 | "PRIMITIVE_ABORT", |
@@ -149,7 +149,7 @@ const char *sctp_pname(const sctp_subtype_t id) | |||
149 | return "unknown_primitive"; | 149 | return "unknown_primitive"; |
150 | } | 150 | } |
151 | 151 | ||
152 | static const char *sctp_other_tbl[] = { | 152 | static const char *const sctp_other_tbl[] = { |
153 | "NO_PENDING_TSN", | 153 | "NO_PENDING_TSN", |
154 | "ICMP_PROTO_UNREACH", | 154 | "ICMP_PROTO_UNREACH", |
155 | }; | 155 | }; |
@@ -162,7 +162,7 @@ const char *sctp_oname(const sctp_subtype_t id) | |||
162 | return "unknown 'other' event"; | 162 | return "unknown 'other' event"; |
163 | } | 163 | } |
164 | 164 | ||
165 | static const char *sctp_timer_tbl[] = { | 165 | static const char *const sctp_timer_tbl[] = { |
166 | "TIMEOUT_NONE", | 166 | "TIMEOUT_NONE", |
167 | "TIMEOUT_T1_COOKIE", | 167 | "TIMEOUT_T1_COOKIE", |
168 | "TIMEOUT_T1_INIT", | 168 | "TIMEOUT_T1_INIT", |
diff --git a/net/sctp/output.c b/net/sctp/output.c index b94c21190566..5cbda8f1ddfd 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -61,8 +61,24 @@ | |||
61 | #include <net/sctp/checksum.h> | 61 | #include <net/sctp/checksum.h> |
62 | 62 | ||
63 | /* Forward declarations for private helpers. */ | 63 | /* Forward declarations for private helpers. */ |
64 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 64 | static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, |
65 | struct sctp_chunk *chunk); | 65 | struct sctp_chunk *chunk); |
66 | static void sctp_packet_append_data(struct sctp_packet *packet, | ||
67 | struct sctp_chunk *chunk); | ||
68 | static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, | ||
69 | struct sctp_chunk *chunk, | ||
70 | u16 chunk_len); | ||
71 | |||
72 | static void sctp_packet_reset(struct sctp_packet *packet) | ||
73 | { | ||
74 | packet->size = packet->overhead; | ||
75 | packet->has_cookie_echo = 0; | ||
76 | packet->has_sack = 0; | ||
77 | packet->has_data = 0; | ||
78 | packet->has_auth = 0; | ||
79 | packet->ipfragok = 0; | ||
80 | packet->auth = NULL; | ||
81 | } | ||
66 | 82 | ||
67 | /* Config a packet. | 83 | /* Config a packet. |
68 | * This appears to be a followup set of initializations. | 84 | * This appears to be a followup set of initializations. |
@@ -75,13 +91,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | |||
75 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, | 91 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, |
76 | packet, vtag); | 92 | packet, vtag); |
77 | 93 | ||
94 | sctp_packet_reset(packet); | ||
78 | packet->vtag = vtag; | 95 | packet->vtag = vtag; |
79 | packet->has_cookie_echo = 0; | ||
80 | packet->has_sack = 0; | ||
81 | packet->has_auth = 0; | ||
82 | packet->has_data = 0; | ||
83 | packet->ipfragok = 0; | ||
84 | packet->auth = NULL; | ||
85 | 96 | ||
86 | if (ecn_capable && sctp_packet_empty(packet)) { | 97 | if (ecn_capable && sctp_packet_empty(packet)) { |
87 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); | 98 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); |
@@ -119,15 +130,9 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | |||
119 | } | 130 | } |
120 | overhead += sizeof(struct sctphdr); | 131 | overhead += sizeof(struct sctphdr); |
121 | packet->overhead = overhead; | 132 | packet->overhead = overhead; |
122 | packet->size = overhead; | 133 | sctp_packet_reset(packet); |
123 | packet->vtag = 0; | 134 | packet->vtag = 0; |
124 | packet->has_cookie_echo = 0; | ||
125 | packet->has_sack = 0; | ||
126 | packet->has_auth = 0; | ||
127 | packet->has_data = 0; | ||
128 | packet->ipfragok = 0; | ||
129 | packet->malloced = 0; | 135 | packet->malloced = 0; |
130 | packet->auth = NULL; | ||
131 | return packet; | 136 | return packet; |
132 | } | 137 | } |
133 | 138 | ||
@@ -204,7 +209,7 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, | |||
204 | /* See if this is an auth chunk we are bundling or if | 209 | /* See if this is an auth chunk we are bundling or if |
205 | * auth is already bundled. | 210 | * auth is already bundled. |
206 | */ | 211 | */ |
207 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth) | 212 | if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) |
208 | return retval; | 213 | return retval; |
209 | 214 | ||
210 | /* if the peer did not request this chunk to be authenticated, | 215 | /* if the peer did not request this chunk to be authenticated, |
@@ -234,18 +239,19 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, | |||
234 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && | 239 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && |
235 | !pkt->has_cookie_echo) { | 240 | !pkt->has_cookie_echo) { |
236 | struct sctp_association *asoc; | 241 | struct sctp_association *asoc; |
242 | struct timer_list *timer; | ||
237 | asoc = pkt->transport->asoc; | 243 | asoc = pkt->transport->asoc; |
244 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | ||
238 | 245 | ||
239 | if (asoc->a_rwnd > asoc->rwnd) { | 246 | /* If the SACK timer is running, we have a pending SACK */ |
247 | if (timer_pending(timer)) { | ||
240 | struct sctp_chunk *sack; | 248 | struct sctp_chunk *sack; |
241 | asoc->a_rwnd = asoc->rwnd; | 249 | asoc->a_rwnd = asoc->rwnd; |
242 | sack = sctp_make_sack(asoc); | 250 | sack = sctp_make_sack(asoc); |
243 | if (sack) { | 251 | if (sack) { |
244 | struct timer_list *timer; | ||
245 | retval = sctp_packet_append_chunk(pkt, sack); | 252 | retval = sctp_packet_append_chunk(pkt, sack); |
246 | asoc->peer.sack_needed = 0; | 253 | asoc->peer.sack_needed = 0; |
247 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | 254 | if (del_timer(timer)) |
248 | if (timer_pending(timer) && del_timer(timer)) | ||
249 | sctp_association_put(asoc); | 255 | sctp_association_put(asoc); |
250 | } | 256 | } |
251 | } | 257 | } |
@@ -261,13 +267,20 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | |||
261 | { | 267 | { |
262 | sctp_xmit_t retval = SCTP_XMIT_OK; | 268 | sctp_xmit_t retval = SCTP_XMIT_OK; |
263 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); | 269 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); |
264 | size_t psize; | ||
265 | size_t pmtu; | ||
266 | int too_big; | ||
267 | 270 | ||
268 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, | 271 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet, |
269 | chunk); | 272 | chunk); |
270 | 273 | ||
274 | /* Data chunks are special. Before seeing what else we can | ||
275 | * bundle into this packet, check to see if we are allowed to | ||
276 | * send this DATA. | ||
277 | */ | ||
278 | if (sctp_chunk_is_data(chunk)) { | ||
279 | retval = sctp_packet_can_append_data(packet, chunk); | ||
280 | if (retval != SCTP_XMIT_OK) | ||
281 | goto finish; | ||
282 | } | ||
283 | |||
271 | /* Try to bundle AUTH chunk */ | 284 | /* Try to bundle AUTH chunk */ |
272 | retval = sctp_packet_bundle_auth(packet, chunk); | 285 | retval = sctp_packet_bundle_auth(packet, chunk); |
273 | if (retval != SCTP_XMIT_OK) | 286 | if (retval != SCTP_XMIT_OK) |
@@ -278,51 +291,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | |||
278 | if (retval != SCTP_XMIT_OK) | 291 | if (retval != SCTP_XMIT_OK) |
279 | goto finish; | 292 | goto finish; |
280 | 293 | ||
281 | psize = packet->size; | 294 | /* Check to see if this chunk will fit into the packet */ |
282 | pmtu = ((packet->transport->asoc) ? | 295 | retval = sctp_packet_will_fit(packet, chunk, chunk_len); |
283 | (packet->transport->asoc->pathmtu) : | 296 | if (retval != SCTP_XMIT_OK) |
284 | (packet->transport->pathmtu)); | 297 | goto finish; |
285 | |||
286 | too_big = (psize + chunk_len > pmtu); | ||
287 | |||
288 | /* Decide if we need to fragment or resubmit later. */ | ||
289 | if (too_big) { | ||
290 | /* It's OK to fragmet at IP level if any one of the following | ||
291 | * is true: | ||
292 | * 1. The packet is empty (meaning this chunk is greater | ||
293 | * the MTU) | ||
294 | * 2. The chunk we are adding is a control chunk | ||
295 | * 3. The packet doesn't have any data in it yet and data | ||
296 | * requires authentication. | ||
297 | */ | ||
298 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || | ||
299 | (!packet->has_data && chunk->auth)) { | ||
300 | /* We no longer do re-fragmentation. | ||
301 | * Just fragment at the IP layer, if we | ||
302 | * actually hit this condition | ||
303 | */ | ||
304 | packet->ipfragok = 1; | ||
305 | goto append; | ||
306 | |||
307 | } else { | ||
308 | retval = SCTP_XMIT_PMTU_FULL; | ||
309 | goto finish; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | append: | ||
314 | /* We believe that this chunk is OK to add to the packet (as | ||
315 | * long as we have the cwnd for it). | ||
316 | */ | ||
317 | 298 | ||
318 | /* DATA is a special case since we must examine both rwnd and cwnd | 299 | /* We believe that this chunk is OK to add to the packet */ |
319 | * before we send DATA. | ||
320 | */ | ||
321 | switch (chunk->chunk_hdr->type) { | 300 | switch (chunk->chunk_hdr->type) { |
322 | case SCTP_CID_DATA: | 301 | case SCTP_CID_DATA: |
323 | retval = sctp_packet_append_data(packet, chunk); | 302 | /* Account for the data being in the packet */ |
324 | if (SCTP_XMIT_OK != retval) | 303 | sctp_packet_append_data(packet, chunk); |
325 | goto finish; | ||
326 | /* Disallow SACK bundling after DATA. */ | 304 | /* Disallow SACK bundling after DATA. */ |
327 | packet->has_sack = 1; | 305 | packet->has_sack = 1; |
328 | /* Disallow AUTH bundling after DATA */ | 306 | /* Disallow AUTH bundling after DATA */ |
@@ -598,7 +576,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
598 | (*tp->af_specific->sctp_xmit)(nskb, tp); | 576 | (*tp->af_specific->sctp_xmit)(nskb, tp); |
599 | 577 | ||
600 | out: | 578 | out: |
601 | packet->size = packet->overhead; | 579 | sctp_packet_reset(packet); |
602 | return err; | 580 | return err; |
603 | no_route: | 581 | no_route: |
604 | kfree_skb(nskb); | 582 | kfree_skb(nskb); |
@@ -632,16 +610,15 @@ nomem: | |||
632 | * 2nd Level Abstractions | 610 | * 2nd Level Abstractions |
633 | ********************************************************************/ | 611 | ********************************************************************/ |
634 | 612 | ||
635 | /* This private function handles the specifics of appending DATA chunks. */ | 613 | /* This private function check to see if a chunk can be added */ |
636 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 614 | static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, |
637 | struct sctp_chunk *chunk) | 615 | struct sctp_chunk *chunk) |
638 | { | 616 | { |
639 | sctp_xmit_t retval = SCTP_XMIT_OK; | 617 | sctp_xmit_t retval = SCTP_XMIT_OK; |
640 | size_t datasize, rwnd, inflight; | 618 | size_t datasize, rwnd, inflight, flight_size; |
641 | struct sctp_transport *transport = packet->transport; | 619 | struct sctp_transport *transport = packet->transport; |
642 | __u32 max_burst_bytes; | 620 | __u32 max_burst_bytes; |
643 | struct sctp_association *asoc = transport->asoc; | 621 | struct sctp_association *asoc = transport->asoc; |
644 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
645 | struct sctp_outq *q = &asoc->outqueue; | 622 | struct sctp_outq *q = &asoc->outqueue; |
646 | 623 | ||
647 | /* RFC 2960 6.1 Transmission of DATA Chunks | 624 | /* RFC 2960 6.1 Transmission of DATA Chunks |
@@ -658,7 +635,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
658 | */ | 635 | */ |
659 | 636 | ||
660 | rwnd = asoc->peer.rwnd; | 637 | rwnd = asoc->peer.rwnd; |
661 | inflight = asoc->outqueue.outstanding_bytes; | 638 | inflight = q->outstanding_bytes; |
639 | flight_size = transport->flight_size; | ||
662 | 640 | ||
663 | datasize = sctp_data_size(chunk); | 641 | datasize = sctp_data_size(chunk); |
664 | 642 | ||
@@ -681,8 +659,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
681 | * cwnd = flightsize + Max.Burst * MTU | 659 | * cwnd = flightsize + Max.Burst * MTU |
682 | */ | 660 | */ |
683 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; | 661 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; |
684 | if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { | 662 | if ((flight_size + max_burst_bytes) < transport->cwnd) { |
685 | transport->cwnd = transport->flight_size + max_burst_bytes; | 663 | transport->cwnd = flight_size + max_burst_bytes; |
686 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " | 664 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " |
687 | "transport: %p, cwnd: %d, " | 665 | "transport: %p, cwnd: %d, " |
688 | "ssthresh: %d, flight_size: %d, " | 666 | "ssthresh: %d, flight_size: %d, " |
@@ -707,7 +685,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
707 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 685 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
708 | */ | 686 | */ |
709 | if (chunk->fast_retransmit != SCTP_NEED_FRTX) | 687 | if (chunk->fast_retransmit != SCTP_NEED_FRTX) |
710 | if (transport->flight_size >= transport->cwnd) { | 688 | if (flight_size >= transport->cwnd) { |
711 | retval = SCTP_XMIT_RWND_FULL; | 689 | retval = SCTP_XMIT_RWND_FULL; |
712 | goto finish; | 690 | goto finish; |
713 | } | 691 | } |
@@ -717,20 +695,36 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
717 | * if any previously transmitted data on the connection remains | 695 | * if any previously transmitted data on the connection remains |
718 | * unacknowledged. | 696 | * unacknowledged. |
719 | */ | 697 | */ |
720 | if (!sp->nodelay && sctp_packet_empty(packet) && | 698 | if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && |
721 | q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { | 699 | inflight && sctp_state(asoc, ESTABLISHED)) { |
722 | unsigned len = datasize + q->out_qlen; | 700 | unsigned max = transport->pathmtu - packet->overhead; |
701 | unsigned len = chunk->skb->len + q->out_qlen; | ||
723 | 702 | ||
724 | /* Check whether this chunk and all the rest of pending | 703 | /* Check whether this chunk and all the rest of pending |
725 | * data will fit or delay in hopes of bundling a full | 704 | * data will fit or delay in hopes of bundling a full |
726 | * sized packet. | 705 | * sized packet. |
706 | * Don't delay large message writes that may have been | ||
707 | * fragmeneted into small peices. | ||
727 | */ | 708 | */ |
728 | if (len < asoc->frag_point) { | 709 | if ((len < max) && (chunk->msg->msg_size < max)) { |
729 | retval = SCTP_XMIT_NAGLE_DELAY; | 710 | retval = SCTP_XMIT_NAGLE_DELAY; |
730 | goto finish; | 711 | goto finish; |
731 | } | 712 | } |
732 | } | 713 | } |
733 | 714 | ||
715 | finish: | ||
716 | return retval; | ||
717 | } | ||
718 | |||
719 | /* This private function does management things when adding DATA chunk */ | ||
720 | static void sctp_packet_append_data(struct sctp_packet *packet, | ||
721 | struct sctp_chunk *chunk) | ||
722 | { | ||
723 | struct sctp_transport *transport = packet->transport; | ||
724 | size_t datasize = sctp_data_size(chunk); | ||
725 | struct sctp_association *asoc = transport->asoc; | ||
726 | u32 rwnd = asoc->peer.rwnd; | ||
727 | |||
734 | /* Keep track of how many bytes are in flight over this transport. */ | 728 | /* Keep track of how many bytes are in flight over this transport. */ |
735 | transport->flight_size += datasize; | 729 | transport->flight_size += datasize; |
736 | 730 | ||
@@ -753,7 +747,45 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | |||
753 | /* Has been accepted for transmission. */ | 747 | /* Has been accepted for transmission. */ |
754 | if (!asoc->peer.prsctp_capable) | 748 | if (!asoc->peer.prsctp_capable) |
755 | chunk->msg->can_abandon = 0; | 749 | chunk->msg->can_abandon = 0; |
750 | } | ||
751 | |||
752 | static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, | ||
753 | struct sctp_chunk *chunk, | ||
754 | u16 chunk_len) | ||
755 | { | ||
756 | size_t psize; | ||
757 | size_t pmtu; | ||
758 | int too_big; | ||
759 | sctp_xmit_t retval = SCTP_XMIT_OK; | ||
760 | |||
761 | psize = packet->size; | ||
762 | pmtu = ((packet->transport->asoc) ? | ||
763 | (packet->transport->asoc->pathmtu) : | ||
764 | (packet->transport->pathmtu)); | ||
765 | |||
766 | too_big = (psize + chunk_len > pmtu); | ||
767 | |||
768 | /* Decide if we need to fragment or resubmit later. */ | ||
769 | if (too_big) { | ||
770 | /* It's OK to fragmet at IP level if any one of the following | ||
771 | * is true: | ||
772 | * 1. The packet is empty (meaning this chunk is greater | ||
773 | * the MTU) | ||
774 | * 2. The chunk we are adding is a control chunk | ||
775 | * 3. The packet doesn't have any data in it yet and data | ||
776 | * requires authentication. | ||
777 | */ | ||
778 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || | ||
779 | (!packet->has_data && chunk->auth)) { | ||
780 | /* We no longer do re-fragmentation. | ||
781 | * Just fragment at the IP layer, if we | ||
782 | * actually hit this condition | ||
783 | */ | ||
784 | packet->ipfragok = 1; | ||
785 | } else { | ||
786 | retval = SCTP_XMIT_PMTU_FULL; | ||
787 | } | ||
788 | } | ||
756 | 789 | ||
757 | finish: | ||
758 | return retval; | 790 | return retval; |
759 | } | 791 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d765fc53e74d..c9f20e28521b 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -406,8 +406,9 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
406 | * not be retransmitted | 406 | * not be retransmitted |
407 | */ | 407 | */ |
408 | if (!chunk->tsn_gap_acked) { | 408 | if (!chunk->tsn_gap_acked) { |
409 | chunk->transport->flight_size -= | 409 | if (chunk->transport) |
410 | sctp_data_size(chunk); | 410 | chunk->transport->flight_size -= |
411 | sctp_data_size(chunk); | ||
411 | q->outstanding_bytes -= sctp_data_size(chunk); | 412 | q->outstanding_bytes -= sctp_data_size(chunk); |
412 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 413 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + |
413 | sizeof(struct sk_buff)); | 414 | sizeof(struct sk_buff)); |
@@ -443,7 +444,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
443 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 444 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + |
444 | sizeof(struct sk_buff)); | 445 | sizeof(struct sk_buff)); |
445 | q->outstanding_bytes -= sctp_data_size(chunk); | 446 | q->outstanding_bytes -= sctp_data_size(chunk); |
446 | transport->flight_size -= sctp_data_size(chunk); | 447 | if (chunk->transport) |
448 | transport->flight_size -= sctp_data_size(chunk); | ||
447 | 449 | ||
448 | /* sctpimpguide-05 Section 2.8.2 | 450 | /* sctpimpguide-05 Section 2.8.2 |
449 | * M5) If a T3-rtx timer expires, the | 451 | * M5) If a T3-rtx timer expires, the |
@@ -1310,6 +1312,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1310 | __u32 rtt; | 1312 | __u32 rtt; |
1311 | __u8 restart_timer = 0; | 1313 | __u8 restart_timer = 0; |
1312 | int bytes_acked = 0; | 1314 | int bytes_acked = 0; |
1315 | int migrate_bytes = 0; | ||
1313 | 1316 | ||
1314 | /* These state variables are for coherent debug output. --xguo */ | 1317 | /* These state variables are for coherent debug output. --xguo */ |
1315 | 1318 | ||
@@ -1343,8 +1346,9 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1343 | * considering it as 'outstanding'. | 1346 | * considering it as 'outstanding'. |
1344 | */ | 1347 | */ |
1345 | if (!tchunk->tsn_gap_acked) { | 1348 | if (!tchunk->tsn_gap_acked) { |
1346 | tchunk->transport->flight_size -= | 1349 | if (tchunk->transport) |
1347 | sctp_data_size(tchunk); | 1350 | tchunk->transport->flight_size -= |
1351 | sctp_data_size(tchunk); | ||
1348 | q->outstanding_bytes -= sctp_data_size(tchunk); | 1352 | q->outstanding_bytes -= sctp_data_size(tchunk); |
1349 | } | 1353 | } |
1350 | continue; | 1354 | continue; |
@@ -1378,6 +1382,20 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1378 | rtt); | 1382 | rtt); |
1379 | } | 1383 | } |
1380 | } | 1384 | } |
1385 | |||
1386 | /* If the chunk hasn't been marked as ACKED, | ||
1387 | * mark it and account bytes_acked if the | ||
1388 | * chunk had a valid transport (it will not | ||
1389 | * have a transport if ASCONF had deleted it | ||
1390 | * while DATA was outstanding). | ||
1391 | */ | ||
1392 | if (!tchunk->tsn_gap_acked) { | ||
1393 | tchunk->tsn_gap_acked = 1; | ||
1394 | bytes_acked += sctp_data_size(tchunk); | ||
1395 | if (!tchunk->transport) | ||
1396 | migrate_bytes += sctp_data_size(tchunk); | ||
1397 | } | ||
1398 | |||
1381 | if (TSN_lte(tsn, sack_ctsn)) { | 1399 | if (TSN_lte(tsn, sack_ctsn)) { |
1382 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 1400 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
1383 | * | 1401 | * |
@@ -1391,8 +1409,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1391 | restart_timer = 1; | 1409 | restart_timer = 1; |
1392 | 1410 | ||
1393 | if (!tchunk->tsn_gap_acked) { | 1411 | if (!tchunk->tsn_gap_acked) { |
1394 | tchunk->tsn_gap_acked = 1; | ||
1395 | bytes_acked += sctp_data_size(tchunk); | ||
1396 | /* | 1412 | /* |
1397 | * SFR-CACC algorithm: | 1413 | * SFR-CACC algorithm: |
1398 | * 2) If the SACK contains gap acks | 1414 | * 2) If the SACK contains gap acks |
@@ -1432,10 +1448,6 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1432 | * older than that newly acknowledged DATA | 1448 | * older than that newly acknowledged DATA |
1433 | * chunk, are qualified as 'Stray DATA chunks'. | 1449 | * chunk, are qualified as 'Stray DATA chunks'. |
1434 | */ | 1450 | */ |
1435 | if (!tchunk->tsn_gap_acked) { | ||
1436 | tchunk->tsn_gap_acked = 1; | ||
1437 | bytes_acked += sctp_data_size(tchunk); | ||
1438 | } | ||
1439 | list_add_tail(lchunk, &tlist); | 1451 | list_add_tail(lchunk, &tlist); |
1440 | } | 1452 | } |
1441 | 1453 | ||
@@ -1491,7 +1503,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1491 | tsn); | 1503 | tsn); |
1492 | tchunk->tsn_gap_acked = 0; | 1504 | tchunk->tsn_gap_acked = 0; |
1493 | 1505 | ||
1494 | bytes_acked -= sctp_data_size(tchunk); | 1506 | if (tchunk->transport) |
1507 | bytes_acked -= sctp_data_size(tchunk); | ||
1495 | 1508 | ||
1496 | /* RFC 2960 6.3.2 Retransmission Timer Rules | 1509 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
1497 | * | 1510 | * |
@@ -1561,6 +1574,14 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1561 | #endif /* SCTP_DEBUG */ | 1574 | #endif /* SCTP_DEBUG */ |
1562 | if (transport) { | 1575 | if (transport) { |
1563 | if (bytes_acked) { | 1576 | if (bytes_acked) { |
1577 | /* We may have counted DATA that was migrated | ||
1578 | * to this transport due to DEL-IP operation. | ||
1579 | * Subtract those bytes, since the were never | ||
1580 | * send on this transport and shouldn't be | ||
1581 | * credited to this transport. | ||
1582 | */ | ||
1583 | bytes_acked -= migrate_bytes; | ||
1584 | |||
1564 | /* 8.2. When an outstanding TSN is acknowledged, | 1585 | /* 8.2. When an outstanding TSN is acknowledged, |
1565 | * the endpoint shall clear the error counter of | 1586 | * the endpoint shall clear the error counter of |
1566 | * the destination transport address to which the | 1587 | * the destination transport address to which the |
@@ -1589,7 +1610,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1589 | transport->flight_size -= bytes_acked; | 1610 | transport->flight_size -= bytes_acked; |
1590 | if (transport->flight_size == 0) | 1611 | if (transport->flight_size == 0) |
1591 | transport->partial_bytes_acked = 0; | 1612 | transport->partial_bytes_acked = 0; |
1592 | q->outstanding_bytes -= bytes_acked; | 1613 | q->outstanding_bytes -= bytes_acked + migrate_bytes; |
1593 | } else { | 1614 | } else { |
1594 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 | 1615 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 |
1595 | * When a sender is doing zero window probing, it | 1616 | * When a sender is doing zero window probing, it |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index f268910620be..d093cbfeaac4 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -512,10 +512,8 @@ int __init sctp_remaddr_proc_init(void) | |||
512 | { | 512 | { |
513 | struct proc_dir_entry *p; | 513 | struct proc_dir_entry *p; |
514 | 514 | ||
515 | p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp); | 515 | p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops); |
516 | if (!p) | 516 | if (!p) |
517 | return -ENOMEM; | 517 | return -ENOMEM; |
518 | p->proc_fops = &sctp_remaddr_seq_fops; | ||
519 | |||
520 | return 0; | 518 | return 0; |
521 | } | 519 | } |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index a76da657244a..60093be8385d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -431,16 +431,14 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) | |||
431 | * of requested destination address, sender and receiver | 431 | * of requested destination address, sender and receiver |
432 | * SHOULD include all of its addresses with level greater | 432 | * SHOULD include all of its addresses with level greater |
433 | * than or equal to L. | 433 | * than or equal to L. |
434 | * | ||
435 | * IPv4 scoping can be controlled through sysctl option | ||
436 | * net.sctp.addr_scope_policy | ||
434 | */ | 437 | */ |
435 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) | 438 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) |
436 | { | 439 | { |
437 | sctp_scope_t retval; | 440 | sctp_scope_t retval; |
438 | 441 | ||
439 | /* Should IPv4 scoping be a sysctl configurable option | ||
440 | * so users can turn it off (default on) for certain | ||
441 | * unconventional networking environments? | ||
442 | */ | ||
443 | |||
444 | /* Check for unusable SCTP addresses. */ | 442 | /* Check for unusable SCTP addresses. */ |
445 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { | 443 | if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { |
446 | retval = SCTP_SCOPE_UNUSABLE; | 444 | retval = SCTP_SCOPE_UNUSABLE; |
@@ -1259,6 +1257,9 @@ SCTP_STATIC __init int sctp_init(void) | |||
1259 | /* Disable AUTH by default. */ | 1257 | /* Disable AUTH by default. */ |
1260 | sctp_auth_enable = 0; | 1258 | sctp_auth_enable = 0; |
1261 | 1259 | ||
1260 | /* Set SCOPE policy to enabled */ | ||
1261 | sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE; | ||
1262 | |||
1262 | sctp_sysctl_register(); | 1263 | sctp_sysctl_register(); |
1263 | 1264 | ||
1264 | INIT_LIST_HEAD(&sctp_address_families); | 1265 | INIT_LIST_HEAD(&sctp_address_families); |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 61cc6075b0df..9d881a61ac02 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2861,6 +2861,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2861 | addr_param = (union sctp_addr_param *) | 2861 | addr_param = (union sctp_addr_param *) |
2862 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | 2862 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); |
2863 | 2863 | ||
2864 | if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && | ||
2865 | asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && | ||
2866 | asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) | ||
2867 | return SCTP_ERROR_UNKNOWN_PARAM; | ||
2868 | |||
2864 | switch (addr_param->v4.param_hdr.type) { | 2869 | switch (addr_param->v4.param_hdr.type) { |
2865 | case SCTP_PARAM_IPV6_ADDRESS: | 2870 | case SCTP_PARAM_IPV6_ADDRESS: |
2866 | if (!asoc->peer.ipv6_address) | 2871 | if (!asoc->peer.ipv6_address) |
@@ -2958,9 +2963,6 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, | |||
2958 | 2963 | ||
2959 | sctp_assoc_set_primary(asoc, peer); | 2964 | sctp_assoc_set_primary(asoc, peer); |
2960 | break; | 2965 | break; |
2961 | default: | ||
2962 | return SCTP_ERROR_UNKNOWN_PARAM; | ||
2963 | break; | ||
2964 | } | 2966 | } |
2965 | 2967 | ||
2966 | return SCTP_ERROR_NO_ERROR; | 2968 | return SCTP_ERROR_NO_ERROR; |
@@ -3104,7 +3106,7 @@ done: | |||
3104 | } | 3106 | } |
3105 | 3107 | ||
3106 | /* Process a asconf parameter that is successfully acked. */ | 3108 | /* Process a asconf parameter that is successfully acked. */ |
3107 | static int sctp_asconf_param_success(struct sctp_association *asoc, | 3109 | static void sctp_asconf_param_success(struct sctp_association *asoc, |
3108 | sctp_addip_param_t *asconf_param) | 3110 | sctp_addip_param_t *asconf_param) |
3109 | { | 3111 | { |
3110 | struct sctp_af *af; | 3112 | struct sctp_af *af; |
@@ -3113,7 +3115,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3113 | union sctp_addr_param *addr_param; | 3115 | union sctp_addr_param *addr_param; |
3114 | struct sctp_transport *transport; | 3116 | struct sctp_transport *transport; |
3115 | struct sctp_sockaddr_entry *saddr; | 3117 | struct sctp_sockaddr_entry *saddr; |
3116 | int retval = 0; | ||
3117 | 3118 | ||
3118 | addr_param = (union sctp_addr_param *) | 3119 | addr_param = (union sctp_addr_param *) |
3119 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | 3120 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); |
@@ -3133,10 +3134,18 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3133 | saddr->state = SCTP_ADDR_SRC; | 3134 | saddr->state = SCTP_ADDR_SRC; |
3134 | } | 3135 | } |
3135 | local_bh_enable(); | 3136 | local_bh_enable(); |
3137 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, | ||
3138 | transports) { | ||
3139 | if (transport->state == SCTP_ACTIVE) | ||
3140 | continue; | ||
3141 | dst_release(transport->dst); | ||
3142 | sctp_transport_route(transport, NULL, | ||
3143 | sctp_sk(asoc->base.sk)); | ||
3144 | } | ||
3136 | break; | 3145 | break; |
3137 | case SCTP_PARAM_DEL_IP: | 3146 | case SCTP_PARAM_DEL_IP: |
3138 | local_bh_disable(); | 3147 | local_bh_disable(); |
3139 | retval = sctp_del_bind_addr(bp, &addr); | 3148 | sctp_del_bind_addr(bp, &addr); |
3140 | local_bh_enable(); | 3149 | local_bh_enable(); |
3141 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, | 3150 | list_for_each_entry(transport, &asoc->peer.transport_addr_list, |
3142 | transports) { | 3151 | transports) { |
@@ -3148,8 +3157,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc, | |||
3148 | default: | 3157 | default: |
3149 | break; | 3158 | break; |
3150 | } | 3159 | } |
3151 | |||
3152 | return retval; | ||
3153 | } | 3160 | } |
3154 | 3161 | ||
3155 | /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk | 3162 | /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk |
@@ -3266,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, | |||
3266 | 3273 | ||
3267 | switch (err_code) { | 3274 | switch (err_code) { |
3268 | case SCTP_ERROR_NO_ERROR: | 3275 | case SCTP_ERROR_NO_ERROR: |
3269 | retval = sctp_asconf_param_success(asoc, asconf_param); | 3276 | sctp_asconf_param_success(asoc, asconf_param); |
3270 | break; | 3277 | break; |
3271 | 3278 | ||
3272 | case SCTP_ERROR_RSRC_LOW: | 3279 | case SCTP_ERROR_RSRC_LOW: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 86426aac1600..8674d4919556 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -440,14 +440,26 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | |||
440 | /* The check for association's overall error counter exceeding the | 440 | /* The check for association's overall error counter exceeding the |
441 | * threshold is done in the state function. | 441 | * threshold is done in the state function. |
442 | */ | 442 | */ |
443 | /* When probing UNCONFIRMED addresses, the association overall | 443 | /* We are here due to a timer expiration. If the timer was |
444 | * error count is NOT incremented | 444 | * not a HEARTBEAT, then normal error tracking is done. |
445 | * If the timer was a heartbeat, we only increment error counts | ||
446 | * when we already have an outstanding HEARTBEAT that has not | ||
447 | * been acknowledged. | ||
448 | * Additionaly, some tranport states inhibit error increments. | ||
445 | */ | 449 | */ |
446 | if (transport->state != SCTP_UNCONFIRMED) | 450 | if (!is_hb) { |
447 | asoc->overall_error_count++; | 451 | asoc->overall_error_count++; |
452 | if (transport->state != SCTP_INACTIVE) | ||
453 | transport->error_count++; | ||
454 | } else if (transport->hb_sent) { | ||
455 | if (transport->state != SCTP_UNCONFIRMED) | ||
456 | asoc->overall_error_count++; | ||
457 | if (transport->state != SCTP_INACTIVE) | ||
458 | transport->error_count++; | ||
459 | } | ||
448 | 460 | ||
449 | if (transport->state != SCTP_INACTIVE && | 461 | if (transport->state != SCTP_INACTIVE && |
450 | (transport->error_count++ >= transport->pathmaxrxt)) { | 462 | (transport->error_count > transport->pathmaxrxt)) { |
451 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", | 463 | SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p", |
452 | " transport IP: port:%d failed.\n", | 464 | " transport IP: port:%d failed.\n", |
453 | asoc, | 465 | asoc, |
@@ -931,6 +943,27 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, | |||
931 | 943 | ||
932 | } | 944 | } |
933 | 945 | ||
946 | /* Send the whole message, chunk by chunk, to the outqueue. | ||
947 | * This way the whole message is queued up and bundling if | ||
948 | * encouraged for small fragments. | ||
949 | */ | ||
950 | static int sctp_cmd_send_msg(struct sctp_association *asoc, | ||
951 | struct sctp_datamsg *msg) | ||
952 | { | ||
953 | struct sctp_chunk *chunk; | ||
954 | int error = 0; | ||
955 | |||
956 | list_for_each_entry(chunk, &msg->chunks, frag_list) { | ||
957 | error = sctp_outq_tail(&asoc->outqueue, chunk); | ||
958 | if (error) | ||
959 | break; | ||
960 | } | ||
961 | |||
962 | return error; | ||
963 | } | ||
964 | |||
965 | |||
966 | |||
934 | /* These three macros allow us to pull the debugging code out of the | 967 | /* These three macros allow us to pull the debugging code out of the |
935 | * main flow of sctp_do_sm() to keep attention focused on the real | 968 | * main flow of sctp_do_sm() to keep attention focused on the real |
936 | * functionality there. | 969 | * functionality there. |
@@ -1500,7 +1533,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1500 | case SCTP_CMD_PROCESS_CTSN: | 1533 | case SCTP_CMD_PROCESS_CTSN: |
1501 | /* Dummy up a SACK for processing. */ | 1534 | /* Dummy up a SACK for processing. */ |
1502 | sackh.cum_tsn_ack = cmd->obj.be32; | 1535 | sackh.cum_tsn_ack = cmd->obj.be32; |
1503 | sackh.a_rwnd = 0; | 1536 | sackh.a_rwnd = asoc->peer.rwnd + |
1537 | asoc->outqueue.outstanding_bytes; | ||
1504 | sackh.num_gap_ack_blocks = 0; | 1538 | sackh.num_gap_ack_blocks = 0; |
1505 | sackh.num_dup_tsns = 0; | 1539 | sackh.num_dup_tsns = 0; |
1506 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | 1540 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, |
@@ -1575,7 +1609,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1575 | case SCTP_CMD_UPDATE_INITTAG: | 1609 | case SCTP_CMD_UPDATE_INITTAG: |
1576 | asoc->peer.i.init_tag = cmd->obj.u32; | 1610 | asoc->peer.i.init_tag = cmd->obj.u32; |
1577 | break; | 1611 | break; |
1578 | 1612 | case SCTP_CMD_SEND_MSG: | |
1613 | if (!asoc->outqueue.cork) { | ||
1614 | sctp_outq_cork(&asoc->outqueue); | ||
1615 | local_cork = 1; | ||
1616 | } | ||
1617 | error = sctp_cmd_send_msg(asoc, cmd->obj.msg); | ||
1618 | break; | ||
1579 | default: | 1619 | default: |
1580 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1620 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1581 | cmd->verb, cmd->obj.ptr); | 1621 | cmd->verb, cmd->obj.ptr); |
@@ -1593,9 +1633,9 @@ out: | |||
1593 | */ | 1633 | */ |
1594 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { | 1634 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { |
1595 | if (chunk->end_of_packet || chunk->singleton) | 1635 | if (chunk->end_of_packet || chunk->singleton) |
1596 | sctp_outq_uncork(&asoc->outqueue); | 1636 | error = sctp_outq_uncork(&asoc->outqueue); |
1597 | } else if (local_cork) | 1637 | } else if (local_cork) |
1598 | sctp_outq_uncork(&asoc->outqueue); | 1638 | error = sctp_outq_uncork(&asoc->outqueue); |
1599 | return error; | 1639 | return error; |
1600 | nomem: | 1640 | nomem: |
1601 | error = -ENOMEM; | 1641 | error = -ENOMEM; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7288192f7df5..c8fae1983dd1 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
334 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) | 334 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) |
335 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 335 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
336 | 336 | ||
337 | /* If the INIT is coming toward a closing socket, we'll send back | ||
338 | * and ABORT. Essentially, this catches the race of INIT being | ||
339 | * backloged to the socket at the same time as the user isses close(). | ||
340 | * Since the socket and all its associations are going away, we | ||
341 | * can treat this OOTB | ||
342 | */ | ||
343 | if (sctp_sstate(ep->base.sk, CLOSING)) | ||
344 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
345 | |||
337 | /* Verify the INIT chunk before processing it. */ | 346 | /* Verify the INIT chunk before processing it. */ |
338 | err_chunk = NULL; | 347 | err_chunk = NULL; |
339 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, | 348 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, |
@@ -962,7 +971,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
962 | { | 971 | { |
963 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 972 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
964 | 973 | ||
965 | if (asoc->overall_error_count > asoc->max_retrans) { | 974 | if (asoc->overall_error_count >= asoc->max_retrans) { |
966 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | 975 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, |
967 | SCTP_ERROR(ETIMEDOUT)); | 976 | SCTP_ERROR(ETIMEDOUT)); |
968 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 977 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
@@ -1106,7 +1115,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1106 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 1115 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
1107 | 1116 | ||
1108 | /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ | 1117 | /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ |
1109 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) | 1118 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + |
1119 | sizeof(sctp_sender_hb_info_t))) | ||
1110 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | 1120 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, |
1111 | commands); | 1121 | commands); |
1112 | 1122 | ||
@@ -2561,6 +2571,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | |||
2561 | chunk->subh.shutdown_hdr = sdh; | 2571 | chunk->subh.shutdown_hdr = sdh; |
2562 | ctsn = ntohl(sdh->cum_tsn_ack); | 2572 | ctsn = ntohl(sdh->cum_tsn_ack); |
2563 | 2573 | ||
2574 | if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { | ||
2575 | SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); | ||
2576 | SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); | ||
2577 | return SCTP_DISPOSITION_DISCARD; | ||
2578 | } | ||
2579 | |||
2564 | /* If Cumulative TSN Ack beyond the max tsn currently | 2580 | /* If Cumulative TSN Ack beyond the max tsn currently |
2565 | * send, terminating the association and respond to the | 2581 | * send, terminating the association and respond to the |
2566 | * sender with an ABORT. | 2582 | * sender with an ABORT. |
@@ -2624,6 +2640,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, | |||
2624 | { | 2640 | { |
2625 | struct sctp_chunk *chunk = arg; | 2641 | struct sctp_chunk *chunk = arg; |
2626 | sctp_shutdownhdr_t *sdh; | 2642 | sctp_shutdownhdr_t *sdh; |
2643 | __u32 ctsn; | ||
2627 | 2644 | ||
2628 | if (!sctp_vtag_verify(chunk, asoc)) | 2645 | if (!sctp_vtag_verify(chunk, asoc)) |
2629 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 2646 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -2635,12 +2652,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep, | |||
2635 | commands); | 2652 | commands); |
2636 | 2653 | ||
2637 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; | 2654 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; |
2655 | ctsn = ntohl(sdh->cum_tsn_ack); | ||
2656 | |||
2657 | if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { | ||
2658 | SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); | ||
2659 | SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); | ||
2660 | return SCTP_DISPOSITION_DISCARD; | ||
2661 | } | ||
2638 | 2662 | ||
2639 | /* If Cumulative TSN Ack beyond the max tsn currently | 2663 | /* If Cumulative TSN Ack beyond the max tsn currently |
2640 | * send, terminating the association and respond to the | 2664 | * send, terminating the association and respond to the |
2641 | * sender with an ABORT. | 2665 | * sender with an ABORT. |
2642 | */ | 2666 | */ |
2643 | if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn)) | 2667 | if (!TSN_lt(ctsn, asoc->next_tsn)) |
2644 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); | 2668 | return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands); |
2645 | 2669 | ||
2646 | /* verify, by checking the Cumulative TSN Ack field of the | 2670 | /* verify, by checking the Cumulative TSN Ack field of the |
@@ -2867,6 +2891,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, | |||
2867 | goto discard_force; | 2891 | goto discard_force; |
2868 | case SCTP_IERROR_NO_DATA: | 2892 | case SCTP_IERROR_NO_DATA: |
2869 | goto consume; | 2893 | goto consume; |
2894 | case SCTP_IERROR_PROTO_VIOLATION: | ||
2895 | return sctp_sf_abort_violation(ep, asoc, chunk, commands, | ||
2896 | (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); | ||
2870 | default: | 2897 | default: |
2871 | BUG(); | 2898 | BUG(); |
2872 | } | 2899 | } |
@@ -2977,6 +3004,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, | |||
2977 | break; | 3004 | break; |
2978 | case SCTP_IERROR_NO_DATA: | 3005 | case SCTP_IERROR_NO_DATA: |
2979 | goto consume; | 3006 | goto consume; |
3007 | case SCTP_IERROR_PROTO_VIOLATION: | ||
3008 | return sctp_sf_abort_violation(ep, asoc, chunk, commands, | ||
3009 | (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); | ||
2980 | default: | 3010 | default: |
2981 | BUG(); | 3011 | BUG(); |
2982 | } | 3012 | } |
@@ -3519,6 +3549,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3519 | asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); | 3549 | asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); |
3520 | if (!asconf_ack) | 3550 | if (!asconf_ack) |
3521 | return SCTP_DISPOSITION_DISCARD; | 3551 | return SCTP_DISPOSITION_DISCARD; |
3552 | |||
3553 | /* Reset the transport so that we select the correct one | ||
3554 | * this time around. This is to make sure that we don't | ||
3555 | * accidentally use a stale transport that's been removed. | ||
3556 | */ | ||
3557 | asconf_ack->transport = NULL; | ||
3522 | } else { | 3558 | } else { |
3523 | /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since | 3559 | /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since |
3524 | * it must be either a stale packet or from an attacker. | 3560 | * it must be either a stale packet or from an attacker. |
@@ -4546,9 +4582,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep, | |||
4546 | void *arg, | 4582 | void *arg, |
4547 | sctp_cmd_seq_t *commands) | 4583 | sctp_cmd_seq_t *commands) |
4548 | { | 4584 | { |
4549 | struct sctp_chunk *chunk = arg; | 4585 | struct sctp_datamsg *msg = arg; |
4550 | 4586 | ||
4551 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); | 4587 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); |
4552 | return SCTP_DISPOSITION_CONSUME; | 4588 | return SCTP_DISPOSITION_CONSUME; |
4553 | } | 4589 | } |
4554 | 4590 | ||
@@ -5847,6 +5883,9 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5847 | __u32 tsn; | 5883 | __u32 tsn; |
5848 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; | 5884 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; |
5849 | struct sock *sk = asoc->base.sk; | 5885 | struct sock *sk = asoc->base.sk; |
5886 | u16 ssn; | ||
5887 | u16 sid; | ||
5888 | u8 ordered = 0; | ||
5850 | 5889 | ||
5851 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; | 5890 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; |
5852 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); | 5891 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); |
@@ -5986,8 +6025,10 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5986 | */ | 6025 | */ |
5987 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 6026 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
5988 | SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); | 6027 | SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); |
5989 | else | 6028 | else { |
5990 | SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); | 6029 | SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); |
6030 | ordered = 1; | ||
6031 | } | ||
5991 | 6032 | ||
5992 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number | 6033 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number |
5993 | * | 6034 | * |
@@ -5997,7 +6038,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5997 | * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) | 6038 | * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) |
5998 | * and discard the DATA chunk. | 6039 | * and discard the DATA chunk. |
5999 | */ | 6040 | */ |
6000 | if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { | 6041 | sid = ntohs(data_hdr->stream); |
6042 | if (sid >= asoc->c.sinit_max_instreams) { | ||
6001 | /* Mark tsn as received even though we drop it */ | 6043 | /* Mark tsn as received even though we drop it */ |
6002 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); | 6044 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); |
6003 | 6045 | ||
@@ -6010,6 +6052,18 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
6010 | return SCTP_IERROR_BAD_STREAM; | 6052 | return SCTP_IERROR_BAD_STREAM; |
6011 | } | 6053 | } |
6012 | 6054 | ||
6055 | /* Check to see if the SSN is possible for this TSN. | ||
6056 | * The biggest gap we can record is 4K wide. Since SSNs wrap | ||
6057 | * at an unsigned short, there is no way that an SSN can | ||
6058 | * wrap and for a valid TSN. We can simply check if the current | ||
6059 | * SSN is smaller then the next expected one. If it is, it wrapped | ||
6060 | * and is invalid. | ||
6061 | */ | ||
6062 | ssn = ntohs(data_hdr->ssn); | ||
6063 | if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { | ||
6064 | return SCTP_IERROR_PROTO_VIOLATION; | ||
6065 | } | ||
6066 | |||
6013 | /* Send the data up to the user. Note: Schedule the | 6067 | /* Send the data up to the user. Note: Schedule the |
6014 | * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK | 6068 | * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK |
6015 | * chunk needs the updated rwnd. | 6069 | * chunk needs the updated rwnd. |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 971890dbfea0..89af37a6c871 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1361,6 +1361,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1361 | 1361 | ||
1362 | sctp_lock_sock(sk); | 1362 | sctp_lock_sock(sk); |
1363 | sk->sk_shutdown = SHUTDOWN_MASK; | 1363 | sk->sk_shutdown = SHUTDOWN_MASK; |
1364 | sk->sk_state = SCTP_SS_CLOSING; | ||
1364 | 1365 | ||
1365 | ep = sctp_sk(sk)->ep; | 1366 | ep = sctp_sk(sk)->ep; |
1366 | 1367 | ||
@@ -1813,20 +1814,22 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
1813 | sctp_set_owner_w(chunk); | 1814 | sctp_set_owner_w(chunk); |
1814 | 1815 | ||
1815 | chunk->transport = chunk_tp; | 1816 | chunk->transport = chunk_tp; |
1816 | |||
1817 | /* Send it to the lower layers. Note: all chunks | ||
1818 | * must either fail or succeed. The lower layer | ||
1819 | * works that way today. Keep it that way or this | ||
1820 | * breaks. | ||
1821 | */ | ||
1822 | err = sctp_primitive_SEND(asoc, chunk); | ||
1823 | /* Did the lower layer accept the chunk? */ | ||
1824 | if (err) | ||
1825 | sctp_chunk_free(chunk); | ||
1826 | SCTP_DEBUG_PRINTK("We sent primitively.\n"); | ||
1827 | } | 1817 | } |
1828 | 1818 | ||
1829 | sctp_datamsg_put(datamsg); | 1819 | /* Send it to the lower layers. Note: all chunks |
1820 | * must either fail or succeed. The lower layer | ||
1821 | * works that way today. Keep it that way or this | ||
1822 | * breaks. | ||
1823 | */ | ||
1824 | err = sctp_primitive_SEND(asoc, datamsg); | ||
1825 | /* Did the lower layer accept the chunk? */ | ||
1826 | if (err) | ||
1827 | sctp_datamsg_free(datamsg); | ||
1828 | else | ||
1829 | sctp_datamsg_put(datamsg); | ||
1830 | |||
1831 | SCTP_DEBUG_PRINTK("We sent primitively.\n"); | ||
1832 | |||
1830 | if (err) | 1833 | if (err) |
1831 | goto out_free; | 1834 | goto out_free; |
1832 | else | 1835 | else |
@@ -2240,7 +2243,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, | |||
2240 | sctp_assoc_sync_pmtu(asoc); | 2243 | sctp_assoc_sync_pmtu(asoc); |
2241 | } else if (asoc) { | 2244 | } else if (asoc) { |
2242 | asoc->pathmtu = params->spp_pathmtu; | 2245 | asoc->pathmtu = params->spp_pathmtu; |
2243 | sctp_frag_point(sp, params->spp_pathmtu); | 2246 | sctp_frag_point(asoc, params->spp_pathmtu); |
2244 | } else { | 2247 | } else { |
2245 | sp->pathmtu = params->spp_pathmtu; | 2248 | sp->pathmtu = params->spp_pathmtu; |
2246 | } | 2249 | } |
@@ -2877,15 +2880,10 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl | |||
2877 | val -= sizeof(struct sctphdr) + | 2880 | val -= sizeof(struct sctphdr) + |
2878 | sizeof(struct sctp_data_chunk); | 2881 | sizeof(struct sctp_data_chunk); |
2879 | } | 2882 | } |
2880 | 2883 | asoc->user_frag = val; | |
2881 | asoc->frag_point = val; | 2884 | asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); |
2882 | } else { | 2885 | } else { |
2883 | sp->user_frag = val; | 2886 | sp->user_frag = val; |
2884 | |||
2885 | /* Update the frag_point of the existing associations. */ | ||
2886 | list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { | ||
2887 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | ||
2888 | } | ||
2889 | } | 2887 | } |
2890 | 2888 | ||
2891 | return 0; | 2889 | return 0; |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 63eabbc71298..ab7151da120f 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -51,6 +51,7 @@ static int timer_max = 86400000; /* ms in one day */ | |||
51 | static int int_max = INT_MAX; | 51 | static int int_max = INT_MAX; |
52 | static int sack_timer_min = 1; | 52 | static int sack_timer_min = 1; |
53 | static int sack_timer_max = 500; | 53 | static int sack_timer_max = 500; |
54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | ||
54 | 55 | ||
55 | extern int sysctl_sctp_mem[3]; | 56 | extern int sysctl_sctp_mem[3]; |
56 | extern int sysctl_sctp_rmem[3]; | 57 | extern int sysctl_sctp_rmem[3]; |
@@ -272,6 +273,17 @@ static ctl_table sctp_table[] = { | |||
272 | .proc_handler = proc_dointvec, | 273 | .proc_handler = proc_dointvec, |
273 | .strategy = sysctl_intvec | 274 | .strategy = sysctl_intvec |
274 | }, | 275 | }, |
276 | { | ||
277 | .ctl_name = CTL_UNNUMBERED, | ||
278 | .procname = "addr_scope_policy", | ||
279 | .data = &sctp_scope_policy, | ||
280 | .maxlen = sizeof(int), | ||
281 | .mode = 0644, | ||
282 | .proc_handler = &proc_dointvec_minmax, | ||
283 | .strategy = &sysctl_intvec, | ||
284 | .extra1 = &zero, | ||
285 | .extra2 = &addr_scope_max, | ||
286 | }, | ||
275 | { .ctl_name = 0 } | 287 | { .ctl_name = 0 } |
276 | }; | 288 | }; |
277 | 289 | ||
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index e5dde45c79d3..c256e4839316 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -503,6 +503,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport, | |||
503 | transport->ssthresh = max(transport->cwnd/2, | 503 | transport->ssthresh = max(transport->cwnd/2, |
504 | 4*transport->asoc->pathmtu); | 504 | 4*transport->asoc->pathmtu); |
505 | transport->cwnd = transport->asoc->pathmtu; | 505 | transport->cwnd = transport->asoc->pathmtu; |
506 | |||
507 | /* T3-rtx also clears fast recovery on the transport */ | ||
508 | transport->fast_recovery = 0; | ||
506 | break; | 509 | break; |
507 | 510 | ||
508 | case SCTP_LOWER_CWND_FAST_RTX: | 511 | case SCTP_LOWER_CWND_FAST_RTX: |