diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-08-31 01:11:31 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-08-31 01:11:31 -0400 |
commit | b91ddd843751947e2f81dfc8a86c5c21cbe07158 (patch) | |
tree | abeab3c0fa11e4295c889951bae407317b7e7eda /net | |
parent | 05bb1fad1cde025a864a90cfeb98dcbefe78a44a (diff) | |
parent | cb243a1a9fef4aaff262a5dd14f987070d37229b (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/vxy/lksctp-dev
Diffstat (limited to 'net')
-rw-r--r-- | net/sctp/associola.c | 7 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 7 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 112 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 8 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 51 | ||||
-rw-r--r-- | net/sctp/socket.c | 3 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 75 |
7 files changed, 187 insertions, 76 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 498edb0cd4e..2ad1caf1ea4 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -727,7 +727,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
727 | break; | 727 | break; |
728 | 728 | ||
729 | case SCTP_TRANSPORT_DOWN: | 729 | case SCTP_TRANSPORT_DOWN: |
730 | transport->state = SCTP_INACTIVE; | 730 | /* if the transort was never confirmed, do not transition it |
731 | * to inactive state. | ||
732 | */ | ||
733 | if (transport->state != SCTP_UNCONFIRMED) | ||
734 | transport->state = SCTP_INACTIVE; | ||
735 | |||
731 | spc_state = SCTP_ADDR_UNREACHABLE; | 736 | spc_state = SCTP_ADDR_UNREACHABLE; |
732 | break; | 737 | break; |
733 | 738 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 992f361084b..28f4fe77cee 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -421,6 +421,13 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
421 | */ | 421 | */ |
422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || | 422 | if ((fast_retransmit && (chunk->fast_retransmit > 0)) || |
423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | 423 | (!fast_retransmit && !chunk->tsn_gap_acked)) { |
424 | /* If this chunk was sent less then 1 rto ago, do not | ||
425 | * retransmit this chunk, but give the peer time | ||
426 | * to acknowlege it. | ||
427 | */ | ||
428 | if ((jiffies - chunk->sent_at) < transport->rto) | ||
429 | continue; | ||
430 | |||
424 | /* RFC 2960 6.2.1 Processing a Received SACK | 431 | /* RFC 2960 6.2.1 Processing a Received SACK |
425 | * | 432 | * |
426 | * C) Any time a DATA chunk is marked for | 433 | * C) Any time a DATA chunk is marked for |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 51c4d7fef1d..79856c92452 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -110,7 +110,7 @@ static const struct sctp_paramhdr prsctp_param = { | |||
110 | * abort chunk. | 110 | * abort chunk. |
111 | */ | 111 | */ |
112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | 112 | void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, |
113 | const void *payload, size_t paylen) | 113 | size_t paylen) |
114 | { | 114 | { |
115 | sctp_errhdr_t err; | 115 | sctp_errhdr_t err; |
116 | __u16 len; | 116 | __u16 len; |
@@ -120,7 +120,6 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, | |||
120 | len = sizeof(sctp_errhdr_t) + paylen; | 120 | len = sizeof(sctp_errhdr_t) + paylen; |
121 | err.length = htons(len); | 121 | err.length = htons(len); |
122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | 122 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); |
123 | sctp_addto_chunk(chunk, paylen, payload); | ||
124 | } | 123 | } |
125 | 124 | ||
126 | /* 3.3.2 Initiation (INIT) (1) | 125 | /* 3.3.2 Initiation (INIT) (1) |
@@ -780,8 +779,8 @@ struct sctp_chunk *sctp_make_abort_no_data( | |||
780 | 779 | ||
781 | /* Put the tsn back into network byte order. */ | 780 | /* Put the tsn back into network byte order. */ |
782 | payload = htonl(tsn); | 781 | payload = htonl(tsn); |
783 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, | 782 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); |
784 | sizeof(payload)); | 783 | sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); |
785 | 784 | ||
786 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | 785 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints |
787 | * | 786 | * |
@@ -823,7 +822,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, | |||
823 | goto err_copy; | 822 | goto err_copy; |
824 | } | 823 | } |
825 | 824 | ||
826 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); | 825 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); |
826 | sctp_addto_chunk(retval, paylen, payload); | ||
827 | 827 | ||
828 | if (paylen) | 828 | if (paylen) |
829 | kfree(payload); | 829 | kfree(payload); |
@@ -850,15 +850,17 @@ struct sctp_chunk *sctp_make_abort_violation( | |||
850 | struct sctp_paramhdr phdr; | 850 | struct sctp_paramhdr phdr; |
851 | 851 | ||
852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen | 852 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen |
853 | + sizeof(sctp_chunkhdr_t)); | 853 | + sizeof(sctp_paramhdr_t)); |
854 | if (!retval) | 854 | if (!retval) |
855 | goto end; | 855 | goto end; |
856 | 856 | ||
857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); | 857 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen |
858 | + sizeof(sctp_paramhdr_t)); | ||
858 | 859 | ||
859 | phdr.type = htons(chunk->chunk_hdr->type); | 860 | phdr.type = htons(chunk->chunk_hdr->type); |
860 | phdr.length = chunk->chunk_hdr->length; | 861 | phdr.length = chunk->chunk_hdr->length; |
861 | sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); | 862 | sctp_addto_chunk(retval, paylen, payload); |
863 | sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); | ||
862 | 864 | ||
863 | end: | 865 | end: |
864 | return retval; | 866 | return retval; |
@@ -955,7 +957,8 @@ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | |||
955 | if (!retval) | 957 | if (!retval) |
956 | goto nodata; | 958 | goto nodata; |
957 | 959 | ||
958 | sctp_init_cause(retval, cause_code, payload, paylen); | 960 | sctp_init_cause(retval, cause_code, paylen); |
961 | sctp_addto_chunk(retval, paylen, payload); | ||
959 | 962 | ||
960 | nodata: | 963 | nodata: |
961 | return retval; | 964 | return retval; |
@@ -1128,7 +1131,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1128 | void *target; | 1131 | void *target; |
1129 | void *padding; | 1132 | void *padding; |
1130 | int chunklen = ntohs(chunk->chunk_hdr->length); | 1133 | int chunklen = ntohs(chunk->chunk_hdr->length); |
1131 | int padlen = chunklen % 4; | 1134 | int padlen = WORD_ROUND(chunklen) - chunklen; |
1132 | 1135 | ||
1133 | padding = skb_put(chunk->skb, padlen); | 1136 | padding = skb_put(chunk->skb, padlen); |
1134 | target = skb_put(chunk->skb, len); | 1137 | target = skb_put(chunk->skb, len); |
@@ -1143,6 +1146,25 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | |||
1143 | return target; | 1146 | return target; |
1144 | } | 1147 | } |
1145 | 1148 | ||
1149 | /* Append bytes to the end of a parameter. Will panic if chunk is not big | ||
1150 | * enough. | ||
1151 | */ | ||
1152 | void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) | ||
1153 | { | ||
1154 | void *target; | ||
1155 | int chunklen = ntohs(chunk->chunk_hdr->length); | ||
1156 | |||
1157 | target = skb_put(chunk->skb, len); | ||
1158 | |||
1159 | memcpy(target, data, len); | ||
1160 | |||
1161 | /* Adjust the chunk length field. */ | ||
1162 | chunk->chunk_hdr->length = htons(chunklen + len); | ||
1163 | chunk->chunk_end = skb_tail_pointer(chunk->skb); | ||
1164 | |||
1165 | return target; | ||
1166 | } | ||
1167 | |||
1146 | /* Append bytes from user space to the end of a chunk. Will panic if | 1168 | /* Append bytes from user space to the end of a chunk. Will panic if |
1147 | * chunk is not big enough. | 1169 | * chunk is not big enough. |
1148 | * Returns a kernel err value. | 1170 | * Returns a kernel err value. |
@@ -1174,25 +1196,36 @@ out: | |||
1174 | */ | 1196 | */ |
1175 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) | 1197 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) |
1176 | { | 1198 | { |
1199 | struct sctp_datamsg *msg; | ||
1200 | struct sctp_chunk *lchunk; | ||
1201 | struct sctp_stream *stream; | ||
1177 | __u16 ssn; | 1202 | __u16 ssn; |
1178 | __u16 sid; | 1203 | __u16 sid; |
1179 | 1204 | ||
1180 | if (chunk->has_ssn) | 1205 | if (chunk->has_ssn) |
1181 | return; | 1206 | return; |
1182 | 1207 | ||
1183 | /* This is the last possible instant to assign a SSN. */ | 1208 | /* All fragments will be on the same stream */ |
1184 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | 1209 | sid = ntohs(chunk->subh.data_hdr->stream); |
1185 | ssn = 0; | 1210 | stream = &chunk->asoc->ssnmap->out; |
1186 | } else { | ||
1187 | sid = ntohs(chunk->subh.data_hdr->stream); | ||
1188 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1189 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); | ||
1190 | else | ||
1191 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); | ||
1192 | } | ||
1193 | 1211 | ||
1194 | chunk->subh.data_hdr->ssn = htons(ssn); | 1212 | /* Now assign the sequence number to the entire message. |
1195 | chunk->has_ssn = 1; | 1213 | * All fragments must have the same stream sequence number. |
1214 | */ | ||
1215 | msg = chunk->msg; | ||
1216 | list_for_each_entry(lchunk, &msg->chunks, frag_list) { | ||
1217 | if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | ||
1218 | ssn = 0; | ||
1219 | } else { | ||
1220 | if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1221 | ssn = sctp_ssn_next(stream, sid); | ||
1222 | else | ||
1223 | ssn = sctp_ssn_peek(stream, sid); | ||
1224 | } | ||
1225 | |||
1226 | lchunk->subh.data_hdr->ssn = htons(ssn); | ||
1227 | lchunk->has_ssn = 1; | ||
1228 | } | ||
1196 | } | 1229 | } |
1197 | 1230 | ||
1198 | /* Helper function to assign a TSN if needed. This assumes that both | 1231 | /* Helper function to assign a TSN if needed. This assumes that both |
@@ -1466,7 +1499,8 @@ no_hmac: | |||
1466 | __be32 n = htonl(usecs); | 1499 | __be32 n = htonl(usecs); |
1467 | 1500 | ||
1468 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, | 1501 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, |
1469 | &n, sizeof(n)); | 1502 | sizeof(n)); |
1503 | sctp_addto_chunk(*errp, sizeof(n), &n); | ||
1470 | *error = -SCTP_IERROR_STALE_COOKIE; | 1504 | *error = -SCTP_IERROR_STALE_COOKIE; |
1471 | } else | 1505 | } else |
1472 | *error = -SCTP_IERROR_NOMEM; | 1506 | *error = -SCTP_IERROR_NOMEM; |
@@ -1556,7 +1590,8 @@ static int sctp_process_missing_param(const struct sctp_association *asoc, | |||
1556 | report.num_missing = htonl(1); | 1590 | report.num_missing = htonl(1); |
1557 | report.type = paramtype; | 1591 | report.type = paramtype; |
1558 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, | 1592 | sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, |
1559 | &report, sizeof(report)); | 1593 | sizeof(report)); |
1594 | sctp_addto_chunk(*errp, sizeof(report), &report); | ||
1560 | } | 1595 | } |
1561 | 1596 | ||
1562 | /* Stop processing this chunk. */ | 1597 | /* Stop processing this chunk. */ |
@@ -1574,7 +1609,7 @@ static int sctp_process_inv_mandatory(const struct sctp_association *asoc, | |||
1574 | *errp = sctp_make_op_error_space(asoc, chunk, 0); | 1609 | *errp = sctp_make_op_error_space(asoc, chunk, 0); |
1575 | 1610 | ||
1576 | if (*errp) | 1611 | if (*errp) |
1577 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); | 1612 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); |
1578 | 1613 | ||
1579 | /* Stop processing this chunk. */ | 1614 | /* Stop processing this chunk. */ |
1580 | return 0; | 1615 | return 0; |
@@ -1595,9 +1630,10 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | |||
1595 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); | 1630 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); |
1596 | 1631 | ||
1597 | if (*errp) { | 1632 | if (*errp) { |
1598 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, | 1633 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, |
1599 | sizeof(error)); | 1634 | sizeof(error) + sizeof(sctp_paramhdr_t)); |
1600 | sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); | 1635 | sctp_addto_chunk(*errp, sizeof(error), error); |
1636 | sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); | ||
1601 | } | 1637 | } |
1602 | 1638 | ||
1603 | return 0; | 1639 | return 0; |
@@ -1618,9 +1654,10 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, | |||
1618 | if (!*errp) | 1654 | if (!*errp) |
1619 | *errp = sctp_make_op_error_space(asoc, chunk, len); | 1655 | *errp = sctp_make_op_error_space(asoc, chunk, len); |
1620 | 1656 | ||
1621 | if (*errp) | 1657 | if (*errp) { |
1622 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, | 1658 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); |
1623 | param.v, len); | 1659 | sctp_addto_chunk(*errp, len, param.v); |
1660 | } | ||
1624 | 1661 | ||
1625 | /* Stop processing this chunk. */ | 1662 | /* Stop processing this chunk. */ |
1626 | return 0; | 1663 | return 0; |
@@ -1672,10 +1709,13 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1672 | *errp = sctp_make_op_error_space(asoc, chunk, | 1709 | *errp = sctp_make_op_error_space(asoc, chunk, |
1673 | ntohs(chunk->chunk_hdr->length)); | 1710 | ntohs(chunk->chunk_hdr->length)); |
1674 | 1711 | ||
1675 | if (*errp) | 1712 | if (*errp) { |
1676 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1713 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1677 | param.v, | ||
1678 | WORD_ROUND(ntohs(param.p->length))); | 1714 | WORD_ROUND(ntohs(param.p->length))); |
1715 | sctp_addto_chunk(*errp, | ||
1716 | WORD_ROUND(ntohs(param.p->length)), | ||
1717 | param.v); | ||
1718 | } | ||
1679 | 1719 | ||
1680 | break; | 1720 | break; |
1681 | case SCTP_PARAM_ACTION_SKIP: | 1721 | case SCTP_PARAM_ACTION_SKIP: |
@@ -1690,8 +1730,10 @@ static int sctp_process_unk_param(const struct sctp_association *asoc, | |||
1690 | 1730 | ||
1691 | if (*errp) { | 1731 | if (*errp) { |
1692 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 1732 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
1693 | param.v, | ||
1694 | WORD_ROUND(ntohs(param.p->length))); | 1733 | WORD_ROUND(ntohs(param.p->length))); |
1734 | sctp_addto_chunk(*errp, | ||
1735 | WORD_ROUND(ntohs(param.p->length)), | ||
1736 | param.v); | ||
1695 | } else { | 1737 | } else { |
1696 | /* If there is no memory for generating the ERROR | 1738 | /* If there is no memory for generating the ERROR |
1697 | * report as specified, an ABORT will be triggered | 1739 | * report as specified, an ABORT will be triggered |
@@ -1791,7 +1833,7 @@ int sctp_verify_init(const struct sctp_association *asoc, | |||
1791 | * VIOLATION error. We build the ERROR chunk here and let the normal | 1833 | * VIOLATION error. We build the ERROR chunk here and let the normal |
1792 | * error handling code build and send the packet. | 1834 | * error handling code build and send the packet. |
1793 | */ | 1835 | */ |
1794 | if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { | 1836 | if (param.v != (void*)chunk->chunk_end) { |
1795 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); | 1837 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); |
1796 | return 0; | 1838 | return 0; |
1797 | } | 1839 | } |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d9fad4f6ffc..8d789008349 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -1013,8 +1013,9 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | |||
1013 | break; | 1013 | break; |
1014 | 1014 | ||
1015 | case SCTP_DISPOSITION_VIOLATION: | 1015 | case SCTP_DISPOSITION_VIOLATION: |
1016 | printk(KERN_ERR "sctp protocol violation state %d " | 1016 | if (net_ratelimit()) |
1017 | "chunkid %d\n", state, subtype.chunk); | 1017 | printk(KERN_ERR "sctp protocol violation state %d " |
1018 | "chunkid %d\n", state, subtype.chunk); | ||
1018 | break; | 1019 | break; |
1019 | 1020 | ||
1020 | case SCTP_DISPOSITION_NOT_IMPL: | 1021 | case SCTP_DISPOSITION_NOT_IMPL: |
@@ -1130,6 +1131,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1130 | /* Move the Cumulattive TSN Ack ahead. */ | 1131 | /* Move the Cumulattive TSN Ack ahead. */ |
1131 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | 1132 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); |
1132 | 1133 | ||
1134 | /* purge the fragmentation queue */ | ||
1135 | sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); | ||
1136 | |||
1133 | /* Abort any in progress partial delivery. */ | 1137 | /* Abort any in progress partial delivery. */ |
1134 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | 1138 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); |
1135 | break; | 1139 | break; |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 71cad56dd73..177528ed3e1 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -264,7 +264,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
264 | struct sctp_chunk *err_chunk; | 264 | struct sctp_chunk *err_chunk; |
265 | struct sctp_packet *packet; | 265 | struct sctp_packet *packet; |
266 | sctp_unrecognized_param_t *unk_param; | 266 | sctp_unrecognized_param_t *unk_param; |
267 | struct sock *sk; | ||
268 | int len; | 267 | int len; |
269 | 268 | ||
270 | /* 6.10 Bundling | 269 | /* 6.10 Bundling |
@@ -285,16 +284,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | |||
285 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | 284 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) |
286 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | 285 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
287 | 286 | ||
288 | sk = ep->base.sk; | ||
289 | /* If the endpoint is not listening or if the number of associations | ||
290 | * on the TCP-style socket exceed the max backlog, respond with an | ||
291 | * ABORT. | ||
292 | */ | ||
293 | if (!sctp_sstate(sk, LISTENING) || | ||
294 | (sctp_style(sk, TCP) && | ||
295 | sk_acceptq_is_full(sk))) | ||
296 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
297 | |||
298 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification | 287 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification |
299 | * Tag. | 288 | * Tag. |
300 | */ | 289 | */ |
@@ -590,6 +579,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
590 | struct sctp_ulpevent *ev, *ai_ev = NULL; | 579 | struct sctp_ulpevent *ev, *ai_ev = NULL; |
591 | int error = 0; | 580 | int error = 0; |
592 | struct sctp_chunk *err_chk_p; | 581 | struct sctp_chunk *err_chk_p; |
582 | struct sock *sk; | ||
593 | 583 | ||
594 | /* If the packet is an OOTB packet which is temporarily on the | 584 | /* If the packet is an OOTB packet which is temporarily on the |
595 | * control endpoint, respond with an ABORT. | 585 | * control endpoint, respond with an ABORT. |
@@ -605,6 +595,15 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | |||
605 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | 595 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) |
606 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 596 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
607 | 597 | ||
598 | /* If the endpoint is not listening or if the number of associations | ||
599 | * on the TCP-style socket exceed the max backlog, respond with an | ||
600 | * ABORT. | ||
601 | */ | ||
602 | sk = ep->base.sk; | ||
603 | if (!sctp_sstate(sk, LISTENING) || | ||
604 | (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) | ||
605 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
606 | |||
608 | /* "Decode" the chunk. We have no optional parameters so we | 607 | /* "Decode" the chunk. We have no optional parameters so we |
609 | * are in good shape. | 608 | * are in good shape. |
610 | */ | 609 | */ |
@@ -1032,19 +1031,21 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1032 | /* This should never happen, but lets log it if so. */ | 1031 | /* This should never happen, but lets log it if so. */ |
1033 | if (unlikely(!link)) { | 1032 | if (unlikely(!link)) { |
1034 | if (from_addr.sa.sa_family == AF_INET6) { | 1033 | if (from_addr.sa.sa_family == AF_INET6) { |
1035 | printk(KERN_WARNING | 1034 | if (net_ratelimit()) |
1036 | "%s association %p could not find address " | 1035 | printk(KERN_WARNING |
1037 | NIP6_FMT "\n", | 1036 | "%s association %p could not find address " |
1038 | __FUNCTION__, | 1037 | NIP6_FMT "\n", |
1039 | asoc, | 1038 | __FUNCTION__, |
1040 | NIP6(from_addr.v6.sin6_addr)); | 1039 | asoc, |
1040 | NIP6(from_addr.v6.sin6_addr)); | ||
1041 | } else { | 1041 | } else { |
1042 | printk(KERN_WARNING | 1042 | if (net_ratelimit()) |
1043 | "%s association %p could not find address " | 1043 | printk(KERN_WARNING |
1044 | NIPQUAD_FMT "\n", | 1044 | "%s association %p could not find address " |
1045 | __FUNCTION__, | 1045 | NIPQUAD_FMT "\n", |
1046 | asoc, | 1046 | __FUNCTION__, |
1047 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | 1047 | asoc, |
1048 | NIPQUAD(from_addr.v4.sin_addr.s_addr)); | ||
1048 | } | 1049 | } |
1049 | return SCTP_DISPOSITION_DISCARD; | 1050 | return SCTP_DISPOSITION_DISCARD; |
1050 | } | 1051 | } |
@@ -3362,7 +3363,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3362 | abort = sctp_make_abort(asoc, asconf_ack, | 3363 | abort = sctp_make_abort(asoc, asconf_ack, |
3363 | sizeof(sctp_errhdr_t)); | 3364 | sizeof(sctp_errhdr_t)); |
3364 | if (abort) { | 3365 | if (abort) { |
3365 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); | 3366 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); |
3366 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3367 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3367 | SCTP_CHUNK(abort)); | 3368 | SCTP_CHUNK(abort)); |
3368 | } | 3369 | } |
@@ -3392,7 +3393,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3392 | abort = sctp_make_abort(asoc, asconf_ack, | 3393 | abort = sctp_make_abort(asoc, asconf_ack, |
3393 | sizeof(sctp_errhdr_t)); | 3394 | sizeof(sctp_errhdr_t)); |
3394 | if (abort) { | 3395 | if (abort) { |
3395 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); | 3396 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); |
3396 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | 3397 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, |
3397 | SCTP_CHUNK(abort)); | 3398 | SCTP_CHUNK(abort)); |
3398 | } | 3399 | } |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 01c6364245b..33354602ae8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -353,6 +353,7 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | |||
353 | * The function sctp_get_port_local() does duplicate address | 353 | * The function sctp_get_port_local() does duplicate address |
354 | * detection. | 354 | * detection. |
355 | */ | 355 | */ |
356 | addr->v4.sin_port = htons(snum); | ||
356 | if ((ret = sctp_get_port_local(sk, addr))) { | 357 | if ((ret = sctp_get_port_local(sk, addr))) { |
357 | if (ret == (long) sk) { | 358 | if (ret == (long) sk) { |
358 | /* This endpoint has a conflicting address. */ | 359 | /* This endpoint has a conflicting address. */ |
@@ -5202,6 +5203,7 @@ SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) | |||
5202 | 5203 | ||
5203 | sctp_unhash_endpoint(ep); | 5204 | sctp_unhash_endpoint(ep); |
5204 | sk->sk_state = SCTP_SS_CLOSED; | 5205 | sk->sk_state = SCTP_SS_CLOSED; |
5206 | return 0; | ||
5205 | } | 5207 | } |
5206 | 5208 | ||
5207 | /* Return if we are already listening. */ | 5209 | /* Return if we are already listening. */ |
@@ -5249,6 +5251,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) | |||
5249 | 5251 | ||
5250 | sctp_unhash_endpoint(ep); | 5252 | sctp_unhash_endpoint(ep); |
5251 | sk->sk_state = SCTP_SS_CLOSED; | 5253 | sk->sk_state = SCTP_SS_CLOSED; |
5254 | return 0; | ||
5252 | } | 5255 | } |
5253 | 5256 | ||
5254 | if (sctp_sstate(sk, LISTENING)) | 5257 | if (sctp_sstate(sk, LISTENING)) |
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 34eb977a204..fa0ba2a5564 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -659,6 +659,46 @@ done: | |||
659 | return retval; | 659 | return retval; |
660 | } | 660 | } |
661 | 661 | ||
662 | /* | ||
663 | * Flush out stale fragments from the reassembly queue when processing | ||
664 | * a Forward TSN. | ||
665 | * | ||
666 | * RFC 3758, Section 3.6 | ||
667 | * | ||
668 | * After receiving and processing a FORWARD TSN, the data receiver MUST | ||
669 | * take cautions in updating its re-assembly queue. The receiver MUST | ||
670 | * remove any partially reassembled message, which is still missing one | ||
671 | * or more TSNs earlier than or equal to the new cumulative TSN point. | ||
672 | * In the event that the receiver has invoked the partial delivery API, | ||
673 | * a notification SHOULD also be generated to inform the upper layer API | ||
674 | * that the message being partially delivered will NOT be completed. | ||
675 | */ | ||
676 | void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) | ||
677 | { | ||
678 | struct sk_buff *pos, *tmp; | ||
679 | struct sctp_ulpevent *event; | ||
680 | __u32 tsn; | ||
681 | |||
682 | if (skb_queue_empty(&ulpq->reasm)) | ||
683 | return; | ||
684 | |||
685 | skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { | ||
686 | event = sctp_skb2event(pos); | ||
687 | tsn = event->tsn; | ||
688 | |||
689 | /* Since the entire message must be abandoned by the | ||
690 | * sender (item A3 in Section 3.5, RFC 3758), we can | ||
691 | * free all fragments on the list that are less then | ||
692 | * or equal to ctsn_point | ||
693 | */ | ||
694 | if (TSN_lte(tsn, fwd_tsn)) { | ||
695 | __skb_unlink(pos, &ulpq->reasm); | ||
696 | sctp_ulpevent_free(event); | ||
697 | } else | ||
698 | break; | ||
699 | } | ||
700 | } | ||
701 | |||
662 | /* Helper function to gather skbs that have possibly become | 702 | /* Helper function to gather skbs that have possibly become |
663 | * ordered by an an incoming chunk. | 703 | * ordered by an an incoming chunk. |
664 | */ | 704 | */ |
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | |||
794 | /* Helper function to gather skbs that have possibly become | 834 | /* Helper function to gather skbs that have possibly become |
795 | * ordered by forward tsn skipping their dependencies. | 835 | * ordered by forward tsn skipping their dependencies. |
796 | */ | 836 | */ |
797 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | 837 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) |
798 | { | 838 | { |
799 | struct sk_buff *pos, *tmp; | 839 | struct sk_buff *pos, *tmp; |
800 | struct sctp_ulpevent *cevent; | 840 | struct sctp_ulpevent *cevent; |
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | |||
813 | csid = cevent->stream; | 853 | csid = cevent->stream; |
814 | cssn = cevent->ssn; | 854 | cssn = cevent->ssn; |
815 | 855 | ||
816 | if (cssn != sctp_ssn_peek(in, csid)) | 856 | /* Have we gone too far? */ |
857 | if (csid > sid) | ||
817 | break; | 858 | break; |
818 | 859 | ||
819 | /* Found it, so mark in the ssnmap. */ | 860 | /* Have we not gone far enough? */ |
820 | sctp_ssn_next(in, csid); | 861 | if (csid < sid) |
862 | continue; | ||
863 | |||
864 | /* see if this ssn has been marked by skipping */ | ||
865 | if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) | ||
866 | break; | ||
821 | 867 | ||
822 | __skb_unlink(pos, &ulpq->lobby); | 868 | __skb_unlink(pos, &ulpq->lobby); |
823 | if (!event) { | 869 | if (!event) |
824 | /* Create a temporary list to collect chunks on. */ | 870 | /* Create a temporary list to collect chunks on. */ |
825 | event = sctp_skb2event(pos); | 871 | event = sctp_skb2event(pos); |
826 | __skb_queue_tail(&temp, sctp_event2skb(event)); | 872 | |
827 | } else { | 873 | /* Attach all gathered skbs to the event. */ |
828 | /* Attach all gathered skbs to the event. */ | 874 | __skb_queue_tail(&temp, pos); |
829 | __skb_queue_tail(&temp, pos); | ||
830 | } | ||
831 | } | 875 | } |
832 | 876 | ||
833 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 877 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
834 | * very first SKB on the 'temp' list. | 878 | * very first SKB on the 'temp' list. |
835 | */ | 879 | */ |
836 | if (event) | 880 | if (event) { |
881 | /* see if we have more ordered that we can deliver */ | ||
882 | sctp_ulpq_retrieve_ordered(ulpq, event); | ||
837 | sctp_ulpq_tail_event(ulpq, event); | 883 | sctp_ulpq_tail_event(ulpq, event); |
884 | } | ||
838 | } | 885 | } |
839 | 886 | ||
840 | /* Skip over an SSN. */ | 887 | /* Skip over an SSN. This is used during the processing of |
888 | * Forwared TSN chunk to skip over the abandoned ordered data | ||
889 | */ | ||
841 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | 890 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) |
842 | { | 891 | { |
843 | struct sctp_stream *in; | 892 | struct sctp_stream *in; |
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
855 | /* Go find any other chunks that were waiting for | 904 | /* Go find any other chunks that were waiting for |
856 | * ordering and deliver them if needed. | 905 | * ordering and deliver them if needed. |
857 | */ | 906 | */ |
858 | sctp_ulpq_reap_ordered(ulpq); | 907 | sctp_ulpq_reap_ordered(ulpq, sid); |
859 | return; | 908 | return; |
860 | } | 909 | } |
861 | 910 | ||