aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/outqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r--net/sctp/outqueue.c65
1 files changed, 35 insertions, 30 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 4328ad5439c9..247ebc95c1e5 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -420,7 +420,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
420 * be added to the retransmit queue. 420 * be added to the retransmit queue.
421 */ 421 */
422 if ((reason == SCTP_RTXR_FAST_RTX && 422 if ((reason == SCTP_RTXR_FAST_RTX &&
423 (chunk->fast_retransmit > 0)) || 423 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
424 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 424 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
425 /* If this chunk was sent less then 1 rto ago, do not 425 /* If this chunk was sent less then 1 rto ago, do not
426 * retransmit this chunk, but give the peer time 426 * retransmit this chunk, but give the peer time
@@ -650,8 +650,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
650 /* Mark the chunk as ineligible for fast retransmit 650 /* Mark the chunk as ineligible for fast retransmit
651 * after it is retransmitted. 651 * after it is retransmitted.
652 */ 652 */
653 if (chunk->fast_retransmit > 0) 653 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
654 chunk->fast_retransmit = -1; 654 chunk->fast_retransmit = SCTP_DONT_FRTX;
655 655
656 /* Force start T3-rtx timer when fast retransmitting 656 /* Force start T3-rtx timer when fast retransmitting
657 * the earliest outstanding TSN 657 * the earliest outstanding TSN
@@ -680,8 +680,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
680 */ 680 */
681 if (rtx_timeout || fast_rtx) { 681 if (rtx_timeout || fast_rtx) {
682 list_for_each_entry(chunk1, lqueue, transmitted_list) { 682 list_for_each_entry(chunk1, lqueue, transmitted_list) {
683 if (chunk1->fast_retransmit > 0) 683 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
684 chunk1->fast_retransmit = -1; 684 chunk1->fast_retransmit = SCTP_DONT_FRTX;
685 } 685 }
686 } 686 }
687 687
@@ -1129,12 +1129,13 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1129 unsigned outstanding; 1129 unsigned outstanding;
1130 struct sctp_transport *primary = asoc->peer.primary_path; 1130 struct sctp_transport *primary = asoc->peer.primary_path;
1131 int count_of_newacks = 0; 1131 int count_of_newacks = 0;
1132 int gap_ack_blocks;
1132 1133
1133 /* Grab the association's destination address list. */ 1134 /* Grab the association's destination address list. */
1134 transport_list = &asoc->peer.transport_addr_list; 1135 transport_list = &asoc->peer.transport_addr_list;
1135 1136
1136 sack_ctsn = ntohl(sack->cum_tsn_ack); 1137 sack_ctsn = ntohl(sack->cum_tsn_ack);
1137 1138 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1138 /* 1139 /*
1139 * SFR-CACC algorithm: 1140 * SFR-CACC algorithm:
1140 * On receipt of a SACK the sender SHOULD execute the 1141 * On receipt of a SACK the sender SHOULD execute the
@@ -1144,35 +1145,38 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1144 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be 1145 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1145 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for 1146 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1146 * all destinations. 1147 * all destinations.
1147 */
1148 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1149 primary->cacc.changeover_active = 0;
1150 list_for_each_entry(transport, transport_list,
1151 transports) {
1152 transport->cacc.cycling_changeover = 0;
1153 }
1154 }
1155
1156 /*
1157 * SFR-CACC algorithm:
1158 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE 1148 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1159 * is set the receiver of the SACK MUST take the following actions: 1149 * is set the receiver of the SACK MUST take the following actions:
1160 * 1150 *
1161 * A) Initialize the cacc_saw_newack to 0 for all destination 1151 * A) Initialize the cacc_saw_newack to 0 for all destination
1162 * addresses. 1152 * addresses.
1153 *
1154 * Only bother if changeover_active is set. Otherwise, this is
1155 * totally suboptimal to do on every SACK.
1163 */ 1156 */
1164 if (sack->num_gap_ack_blocks && 1157 if (primary->cacc.changeover_active) {
1165 primary->cacc.changeover_active) { 1158 u8 clear_cycling = 0;
1166 list_for_each_entry(transport, transport_list, transports) { 1159
1167 transport->cacc.cacc_saw_newack = 0; 1160 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1161 primary->cacc.changeover_active = 0;
1162 clear_cycling = 1;
1163 }
1164
1165 if (clear_cycling || gap_ack_blocks) {
1166 list_for_each_entry(transport, transport_list,
1167 transports) {
1168 if (clear_cycling)
1169 transport->cacc.cycling_changeover = 0;
1170 if (gap_ack_blocks)
1171 transport->cacc.cacc_saw_newack = 0;
1172 }
1168 } 1173 }
1169 } 1174 }
1170 1175
1171 /* Get the highest TSN in the sack. */ 1176 /* Get the highest TSN in the sack. */
1172 highest_tsn = sack_ctsn; 1177 highest_tsn = sack_ctsn;
1173 if (sack->num_gap_ack_blocks) 1178 if (gap_ack_blocks)
1174 highest_tsn += 1179 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1175 ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end);
1176 1180
1177 if (TSN_lt(asoc->highest_sacked, highest_tsn)) { 1181 if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
1178 highest_new_tsn = highest_tsn; 1182 highest_new_tsn = highest_tsn;
@@ -1181,11 +1185,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1181 highest_new_tsn = sctp_highest_new_tsn(sack, asoc); 1185 highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
1182 } 1186 }
1183 1187
1188
1184 /* Run through the retransmit queue. Credit bytes received 1189 /* Run through the retransmit queue. Credit bytes received
1185 * and free those chunks that we can. 1190 * and free those chunks that we can.
1186 */ 1191 */
1187 sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); 1192 sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn);
1188 sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0);
1189 1193
1190 /* Run through the transmitted queue. 1194 /* Run through the transmitted queue.
1191 * Credit bytes received and free those chunks which we can. 1195 * Credit bytes received and free those chunks which we can.
@@ -1204,9 +1208,10 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1204 count_of_newacks ++; 1208 count_of_newacks ++;
1205 } 1209 }
1206 1210
1207 list_for_each_entry(transport, transport_list, transports) { 1211 if (gap_ack_blocks) {
1208 sctp_mark_missing(q, &transport->transmitted, transport, 1212 list_for_each_entry(transport, transport_list, transports)
1209 highest_new_tsn, count_of_newacks); 1213 sctp_mark_missing(q, &transport->transmitted, transport,
1214 highest_new_tsn, count_of_newacks);
1210 } 1215 }
1211 1216
1212 /* Move the Cumulative TSN Ack Point if appropriate. */ 1217 /* Move the Cumulative TSN Ack Point if appropriate. */
@@ -1651,7 +1656,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
1651 * chunk if it has NOT been fast retransmitted or marked for 1656 * chunk if it has NOT been fast retransmitted or marked for
1652 * fast retransmit already. 1657 * fast retransmit already.
1653 */ 1658 */
1654 if (!chunk->fast_retransmit && 1659 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1655 !chunk->tsn_gap_acked && 1660 !chunk->tsn_gap_acked &&
1656 TSN_lt(tsn, highest_new_tsn_in_sack)) { 1661 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1657 1662
@@ -1676,7 +1681,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
1676 */ 1681 */
1677 1682
1678 if (chunk->tsn_missing_report >= 3) { 1683 if (chunk->tsn_missing_report >= 3) {
1679 chunk->fast_retransmit = 1; 1684 chunk->fast_retransmit = SCTP_NEED_FRTX;
1680 do_fast_retransmit = 1; 1685 do_fast_retransmit = 1;
1681 } 1686 }
1682 } 1687 }