aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/outqueue.c
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2018-05-14 13:34:40 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-14 22:57:15 -0400
commitcb93cc5d06d9b1016326376a4980d11a9040afd2 (patch)
tree796f4d5017039d9ca841ec2fb3ba817c2044ff03 /net/sctp/outqueue.c
parent96e0418e812e9c30211b55ffcddc5f03bfd96919 (diff)
sctp: move flushing of data chunks out of sctp_outq_flush
To the new sctp_outq_flush_data. Again, smaller functions and with well defined objectives. Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r--net/sctp/outqueue.c149
1 files changed, 75 insertions, 74 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 49e80bf2ade7..bfa2e43dfd31 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1038,46 +1038,17 @@ static bool sctp_outq_flush_rtx(struct sctp_outq *q,
1038 1038
1039 return true; 1039 return true;
1040} 1040}
1041/* 1041
1042 * Try to flush an outqueue. 1042static void sctp_outq_flush_data(struct sctp_outq *q,
1043 * 1043 struct sctp_transport **_transport,
1044 * Description: Send everything in q which we legally can, subject to 1044 struct list_head *transport_list,
1045 * congestion limitations. 1045 int rtx_timeout, gfp_t gfp)
1046 * * Note: This function can be called from multiple contexts so appropriate
1047 * locking concerns must be made. Today we use the sock lock to protect
1048 * this function.
1049 */
1050static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1051{ 1046{
1052 struct sctp_packet *packet; 1047 struct sctp_transport *transport = *_transport;
1048 struct sctp_packet *packet = transport ? &transport->packet : NULL;
1053 struct sctp_association *asoc = q->asoc; 1049 struct sctp_association *asoc = q->asoc;
1054 struct sctp_transport *transport = NULL;
1055 struct sctp_chunk *chunk; 1050 struct sctp_chunk *chunk;
1056 enum sctp_xmit status; 1051 enum sctp_xmit status;
1057 int error = 0;
1058
1059 /* These transports have chunks to send. */
1060 struct list_head transport_list;
1061 struct list_head *ltransport;
1062
1063 INIT_LIST_HEAD(&transport_list);
1064 packet = NULL;
1065
1066 /*
1067 * 6.10 Bundling
1068 * ...
1069 * When bundling control chunks with DATA chunks, an
1070 * endpoint MUST place control chunks first in the outbound
1071 * SCTP packet. The transmitter MUST transmit DATA chunks
1072 * within a SCTP packet in increasing order of TSN.
1073 * ...
1074 */
1075
1076 sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp);
1077 packet = &transport->packet;
1078
1079 if (q->asoc->src_out_of_asoc_ok)
1080 goto sctp_flush_out;
1081 1052
1082 /* Is it OK to send data chunks? */ 1053 /* Is it OK to send data chunks? */
1083 switch (asoc->state) { 1054 switch (asoc->state) {
@@ -1102,10 +1073,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1102 * current cwnd). 1073 * current cwnd).
1103 */ 1074 */
1104 if (!list_empty(&q->retransmit)) { 1075 if (!list_empty(&q->retransmit)) {
1105 if (!sctp_outq_flush_rtx(q, &transport, &transport_list, 1076 if (!sctp_outq_flush_rtx(q, _transport, transport_list,
1106 rtx_timeout)) 1077 rtx_timeout))
1107 break; 1078 break;
1108 /* We may have switched current transport */ 1079 /* We may have switched current transport */
1080 transport = *_transport;
1109 packet = &transport->packet; 1081 packet = &transport->packet;
1110 } 1082 }
1111 1083
@@ -1131,12 +1103,14 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1131 1103
1132 if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) { 1104 if (asoc->stream.out[sid].state == SCTP_STREAM_CLOSED) {
1133 sctp_outq_head_data(q, chunk); 1105 sctp_outq_head_data(q, chunk);
1134 goto sctp_flush_out; 1106 break;
1135 } 1107 }
1136 1108
1137 if (sctp_outq_select_transport(chunk, asoc, &transport, 1109 if (sctp_outq_select_transport(chunk, asoc, _transport,
1138 &transport_list)) 1110 transport_list)) {
1111 transport = *_transport;
1139 packet = &transport->packet; 1112 packet = &transport->packet;
1113 }
1140 1114
1141 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " 1115 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1142 "skb->users:%d\n", 1116 "skb->users:%d\n",
@@ -1148,8 +1122,10 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1148 1122
1149 /* Add the chunk to the packet. */ 1123 /* Add the chunk to the packet. */
1150 status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp); 1124 status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
1151
1152 switch (status) { 1125 switch (status) {
1126 case SCTP_XMIT_OK:
1127 break;
1128
1153 case SCTP_XMIT_PMTU_FULL: 1129 case SCTP_XMIT_PMTU_FULL:
1154 case SCTP_XMIT_RWND_FULL: 1130 case SCTP_XMIT_RWND_FULL:
1155 case SCTP_XMIT_DELAY: 1131 case SCTP_XMIT_DELAY:
@@ -1161,41 +1137,25 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1161 status); 1137 status);
1162 1138
1163 sctp_outq_head_data(q, chunk); 1139 sctp_outq_head_data(q, chunk);
1164 goto sctp_flush_out; 1140 return;
1165
1166 case SCTP_XMIT_OK:
1167 /* The sender is in the SHUTDOWN-PENDING state,
1168 * The sender MAY set the I-bit in the DATA
1169 * chunk header.
1170 */
1171 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1172 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1173 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1174 asoc->stats.ouodchunks++;
1175 else
1176 asoc->stats.oodchunks++;
1177
1178 /* Only now it's safe to consider this
1179 * chunk as sent, sched-wise.
1180 */
1181 sctp_sched_dequeue_done(q, chunk);
1182
1183 break;
1184
1185 default:
1186 BUG();
1187 } 1141 }
1188 1142
1189 /* BUG: We assume that the sctp_packet_transmit() 1143 /* The sender is in the SHUTDOWN-PENDING state,
1190 * call below will succeed all the time and add the 1144 * The sender MAY set the I-bit in the DATA
1191 * chunk to the transmitted list and restart the 1145 * chunk header.
1192 * timers.
1193 * It is possible that the call can fail under OOM
1194 * conditions.
1195 *
1196 * Is this really a problem? Won't this behave
1197 * like a lost TSN?
1198 */ 1146 */
1147 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1148 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1149 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1150 asoc->stats.ouodchunks++;
1151 else
1152 asoc->stats.oodchunks++;
1153
1154 /* Only now it's safe to consider this
1155 * chunk as sent, sched-wise.
1156 */
1157 sctp_sched_dequeue_done(q, chunk);
1158
1199 list_add_tail(&chunk->transmitted_list, 1159 list_add_tail(&chunk->transmitted_list,
1200 &transport->transmitted); 1160 &transport->transmitted);
1201 1161
@@ -1206,7 +1166,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1206 * COOKIE-ECHO chunk. 1166 * COOKIE-ECHO chunk.
1207 */ 1167 */
1208 if (packet->has_cookie_echo) 1168 if (packet->has_cookie_echo)
1209 goto sctp_flush_out; 1169 break;
1210 } 1170 }
1211 break; 1171 break;
1212 1172
@@ -1214,6 +1174,47 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1214 /* Do nothing. */ 1174 /* Do nothing. */
1215 break; 1175 break;
1216 } 1176 }
1177}
1178
1179/*
1180 * Try to flush an outqueue.
1181 *
1182 * Description: Send everything in q which we legally can, subject to
1183 * congestion limitations.
1184 * * Note: This function can be called from multiple contexts so appropriate
1185 * locking concerns must be made. Today we use the sock lock to protect
1186 * this function.
1187 */
1188static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1189{
1190 struct sctp_packet *packet;
1191 struct sctp_association *asoc = q->asoc;
1192 struct sctp_transport *transport = NULL;
1193 int error = 0;
1194
1195 /* These transports have chunks to send. */
1196 struct list_head transport_list;
1197 struct list_head *ltransport;
1198
1199 INIT_LIST_HEAD(&transport_list);
1200 packet = NULL;
1201
1202 /*
1203 * 6.10 Bundling
1204 * ...
1205 * When bundling control chunks with DATA chunks, an
1206 * endpoint MUST place control chunks first in the outbound
1207 * SCTP packet. The transmitter MUST transmit DATA chunks
1208 * within a SCTP packet in increasing order of TSN.
1209 * ...
1210 */
1211
1212 sctp_outq_flush_ctrl(q, &transport, &transport_list, gfp);
1213
1214 if (q->asoc->src_out_of_asoc_ok)
1215 goto sctp_flush_out;
1216
1217 sctp_outq_flush_data(q, &transport, &transport_list, rtx_timeout, gfp);
1217 1218
1218sctp_flush_out: 1219sctp_flush_out:
1219 1220