aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/outqueue.c
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2017-10-03 18:20:13 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-03 19:27:29 -0400
commit5bbbbe32a43199c2b9ea5ea66fab6241c64beb51 (patch)
tree689b8896bcbcc50cfbc4554d79577cec95bb39f2 /net/sctp/outqueue.c
parent2fc019f790312e703efa1a44204c586112a430dc (diff)
sctp: introduce stream scheduler foundations
This patch introduces the hooks necessary to do stream scheduling, as per RFC Draft ndata. It also introduces the first scheduler, which is what we do today but now factored out: first come first served (FCFS). With stream scheduling now we have to track which chunk was enqueued on which stream and be able to select another other than the in front of the main outqueue. So we introduce a list on sctp_stream_out_ext structure for this purpose. We reuse sctp_chunk->transmitted_list space for the list above, as the chunk cannot belong to the two lists at the same time. By using the union in there, we can have distinct names for these moments. sctp_sched_ops are the operations expected to be implemented by each scheduler. The dequeueing is a bit particular to this implementation but it is to match how we dequeue packets today. We first dequeue and then check if it fits the packet and if not, we requeue it at head. Thus why we don't have a peek operation but have dequeue_done instead, which is called once the chunk can be safely considered as transmitted. The check removed from sctp_outq_flush is now performed by sctp_stream_outq_migrate, which is only called during assoc setup. (sctp_sendmsg() also checks for it) The only operation that is foreseen but not yet added here is a way to signalize that a new packet is starting or that the packet is done, for round robin scheduler per packet, but is intentionally left to the patch that actually implements it. Support for I-DATA chunks, also described in this RFC, with user message interleaving is straightforward as it just requires the schedulers to probe for the feature and ignore datamsg boundaries when dequeueing. See-also: https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-13 Tested-by: Xin Long <lucien.xin@gmail.com> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r--net/sctp/outqueue.c59
1 files changed, 32 insertions, 27 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 746b07b7937d..4db012aa25f7 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -50,6 +50,7 @@
50 50
51#include <net/sctp/sctp.h> 51#include <net/sctp/sctp.h>
52#include <net/sctp/sm.h> 52#include <net/sctp/sm.h>
53#include <net/sctp/stream_sched.h>
53 54
54/* Declare internal functions here. */ 55/* Declare internal functions here. */
55static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 56static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
@@ -72,32 +73,38 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
72 73
73/* Add data to the front of the queue. */ 74/* Add data to the front of the queue. */
74static inline void sctp_outq_head_data(struct sctp_outq *q, 75static inline void sctp_outq_head_data(struct sctp_outq *q,
75 struct sctp_chunk *ch) 76 struct sctp_chunk *ch)
76{ 77{
78 struct sctp_stream_out_ext *oute;
79 __u16 stream;
80
77 list_add(&ch->list, &q->out_chunk_list); 81 list_add(&ch->list, &q->out_chunk_list);
78 q->out_qlen += ch->skb->len; 82 q->out_qlen += ch->skb->len;
83
84 stream = sctp_chunk_stream_no(ch);
85 oute = q->asoc->stream.out[stream].ext;
86 list_add(&ch->stream_list, &oute->outq);
79} 87}
80 88
81/* Take data from the front of the queue. */ 89/* Take data from the front of the queue. */
82static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) 90static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
83{ 91{
84 struct sctp_chunk *ch = NULL; 92 return q->sched->dequeue(q);
85
86 if (!list_empty(&q->out_chunk_list)) {
87 struct list_head *entry = q->out_chunk_list.next;
88
89 ch = list_entry(entry, struct sctp_chunk, list);
90 list_del_init(entry);
91 q->out_qlen -= ch->skb->len;
92 }
93 return ch;
94} 93}
94
95/* Add data chunk to the end of the queue. */ 95/* Add data chunk to the end of the queue. */
96static inline void sctp_outq_tail_data(struct sctp_outq *q, 96static inline void sctp_outq_tail_data(struct sctp_outq *q,
97 struct sctp_chunk *ch) 97 struct sctp_chunk *ch)
98{ 98{
99 struct sctp_stream_out_ext *oute;
100 __u16 stream;
101
99 list_add_tail(&ch->list, &q->out_chunk_list); 102 list_add_tail(&ch->list, &q->out_chunk_list);
100 q->out_qlen += ch->skb->len; 103 q->out_qlen += ch->skb->len;
104
105 stream = sctp_chunk_stream_no(ch);
106 oute = q->asoc->stream.out[stream].ext;
107 list_add_tail(&ch->stream_list, &oute->outq);
101} 108}
102 109
103/* 110/*
@@ -207,6 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
207 INIT_LIST_HEAD(&q->retransmit); 214 INIT_LIST_HEAD(&q->retransmit);
208 INIT_LIST_HEAD(&q->sacked); 215 INIT_LIST_HEAD(&q->sacked);
209 INIT_LIST_HEAD(&q->abandoned); 216 INIT_LIST_HEAD(&q->abandoned);
217 sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
210} 218}
211 219
212/* Free the outqueue structure and any related pending chunks. 220/* Free the outqueue structure and any related pending chunks.
@@ -258,6 +266,7 @@ static void __sctp_outq_teardown(struct sctp_outq *q)
258 266
259 /* Throw away any leftover data chunks. */ 267 /* Throw away any leftover data chunks. */
260 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 268 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
269 sctp_sched_dequeue_done(q, chunk);
261 270
262 /* Mark as send failure. */ 271 /* Mark as send failure. */
263 sctp_chunk_fail(chunk, q->error); 272 sctp_chunk_fail(chunk, q->error);
@@ -391,13 +400,14 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
391 struct sctp_outq *q = &asoc->outqueue; 400 struct sctp_outq *q = &asoc->outqueue;
392 struct sctp_chunk *chk, *temp; 401 struct sctp_chunk *chk, *temp;
393 402
403 q->sched->unsched_all(&asoc->stream);
404
394 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { 405 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
395 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 406 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
396 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 407 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
397 continue; 408 continue;
398 409
399 list_del_init(&chk->list); 410 sctp_sched_dequeue_common(q, chk);
400 q->out_qlen -= chk->skb->len;
401 asoc->sent_cnt_removable--; 411 asoc->sent_cnt_removable--;
402 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; 412 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
403 if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) { 413 if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
@@ -415,6 +425,8 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
415 break; 425 break;
416 } 426 }
417 427
428 q->sched->sched_all(&asoc->stream);
429
418 return msg_len; 430 return msg_len;
419} 431}
420 432
@@ -1033,22 +1045,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1033 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { 1045 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
1034 __u32 sid = ntohs(chunk->subh.data_hdr->stream); 1046 __u32 sid = ntohs(chunk->subh.data_hdr->stream);
1035 1047
1036 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
1037 * stream identifier.
1038 */
1039 if (chunk->sinfo.sinfo_stream >= asoc->stream.outcnt) {
1040
1041 /* Mark as failed send. */
1042 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
1043 if (asoc->peer.prsctp_capable &&
1044 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1045 asoc->sent_cnt_removable--;
1046 sctp_chunk_free(chunk);
1047 continue;
1048 }
1049
1050 /* Has this chunk expired? */ 1048 /* Has this chunk expired? */
1051 if (sctp_chunk_abandoned(chunk)) { 1049 if (sctp_chunk_abandoned(chunk)) {
1050 sctp_sched_dequeue_done(q, chunk);
1052 sctp_chunk_fail(chunk, 0); 1051 sctp_chunk_fail(chunk, 0);
1053 sctp_chunk_free(chunk); 1052 sctp_chunk_free(chunk);
1054 continue; 1053 continue;
@@ -1070,6 +1069,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1070 new_transport = asoc->peer.active_path; 1069 new_transport = asoc->peer.active_path;
1071 if (new_transport->state == SCTP_UNCONFIRMED) { 1070 if (new_transport->state == SCTP_UNCONFIRMED) {
1072 WARN_ONCE(1, "Attempt to send packet on unconfirmed path."); 1071 WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
1072 sctp_sched_dequeue_done(q, chunk);
1073 sctp_chunk_fail(chunk, 0); 1073 sctp_chunk_fail(chunk, 0);
1074 sctp_chunk_free(chunk); 1074 sctp_chunk_free(chunk);
1075 continue; 1075 continue;
@@ -1133,6 +1133,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1133 else 1133 else
1134 asoc->stats.oodchunks++; 1134 asoc->stats.oodchunks++;
1135 1135
1136 /* Only now it's safe to consider this
1137 * chunk as sent, sched-wise.
1138 */
1139 sctp_sched_dequeue_done(q, chunk);
1140
1136 break; 1141 break;
1137 1142
1138 default: 1143 default: