summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXin Long <lucien.xin@gmail.com>2017-12-08 08:04:08 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-11 11:23:05 -0500
commit65f5e357839e40817aead853d7a7f61ff828b52b (patch)
tree3c823997dee2e3f22f5480f82c92c0f185ee9a2a
parentbe4e0ce10dc64b9a8aae42ec3dbd906022f91ec5 (diff)
sctp: implement abort_pd for sctp_stream_interleave
abort_pd is added as a member of sctp_stream_interleave, used to abort partial delivery for data or idata, called in sctp_cmd_assoc_failed. Since stream interleave allows to do partial delivery for each stream at the same time, sctp_intl_abort_pd for idata would be very different from the old function sctp_ulpq_abort_pd for data. Note that sctp_ulpevent_make_pdapi will support per stream in this patch by adding pdapi_stream and pdapi_seq in sctp_pdapi_event, as described in section 6.1.7 of RFC6458. Signed-off-by: Xin Long <lucien.xin@gmail.com> Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sctp/stream_interleave.h1
-rw-r--r--include/net/sctp/ulpevent.h3
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/stream_interleave.c99
-rw-r--r--net/sctp/ulpevent.c9
-rw-r--r--net/sctp/ulpqueue.c2
7 files changed, 112 insertions, 6 deletions
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
index 317d9b3a5299..501b2be049a3 100644
--- a/include/net/sctp/stream_interleave.h
+++ b/include/net/sctp/stream_interleave.h
@@ -46,6 +46,7 @@ struct sctp_stream_interleave {
46 void (*renege_events)(struct sctp_ulpq *ulpq, 46 void (*renege_events)(struct sctp_ulpq *ulpq,
47 struct sctp_chunk *chunk, gfp_t gfp); 47 struct sctp_chunk *chunk, gfp_t gfp);
48 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp); 48 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
49 void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
49}; 50};
50 51
51void sctp_stream_interleave_init(struct sctp_stream *stream); 52void sctp_stream_interleave_init(struct sctp_stream *stream);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index ce4f2aa35d56..51b4e0626c34 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -122,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
122 122
123struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 123struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
124 const struct sctp_association *asoc, 124 const struct sctp_association *asoc,
125 __u32 indication, gfp_t gfp); 125 __u32 indication, __u32 sid, __u32 seq,
126 __u32 flags, gfp_t gfp);
126 127
127struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( 128struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
128 const struct sctp_association *asoc, gfp_t gfp); 129 const struct sctp_association *asoc, gfp_t gfp);
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6ed934c65a5f..4c4db14786bd 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -460,6 +460,8 @@ struct sctp_pdapi_event {
460 __u32 pdapi_length; 460 __u32 pdapi_length;
461 __u32 pdapi_indication; 461 __u32 pdapi_indication;
462 sctp_assoc_t pdapi_assoc_id; 462 sctp_assoc_t pdapi_assoc_id;
463 __u32 pdapi_stream;
464 __u32 pdapi_seq;
463}; 465};
464 466
465enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, }; 467enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 36710549a4ca..8adde71fdb31 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
632 struct sctp_chunk *abort; 632 struct sctp_chunk *abort;
633 633
634 /* Cancel any partial delivery in progress. */ 634 /* Cancel any partial delivery in progress. */
635 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 635 asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
636 636
637 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) 637 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
638 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, 638 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 4dce8d33c5ab..d15645ea338b 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -652,6 +652,103 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
652 sk_mem_reclaim(asoc->base.sk); 652 sk_mem_reclaim(asoc->base.sk);
653} 653}
654 654
655static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
656 __u32 mid, __u16 flags, gfp_t gfp)
657{
658 struct sock *sk = ulpq->asoc->base.sk;
659 struct sctp_ulpevent *ev = NULL;
660
661 if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
662 &sctp_sk(sk)->subscribe))
663 return;
664
665 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
666 sid, mid, flags, gfp);
667 if (ev) {
668 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
669
670 if (!sctp_sk(sk)->data_ready_signalled) {
671 sctp_sk(sk)->data_ready_signalled = 1;
672 sk->sk_data_ready(sk);
673 }
674 }
675}
676
677static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
678{
679 struct sctp_stream *stream = &ulpq->asoc->stream;
680 struct sctp_ulpevent *cevent, *event = NULL;
681 struct sk_buff_head *lobby = &ulpq->lobby;
682 struct sk_buff *pos, *tmp;
683 struct sk_buff_head temp;
684 __u16 csid;
685 __u32 cmid;
686
687 skb_queue_head_init(&temp);
688 sctp_skb_for_each(pos, lobby, tmp) {
689 cevent = (struct sctp_ulpevent *)pos->cb;
690 csid = cevent->stream;
691 cmid = cevent->mid;
692
693 if (csid > sid)
694 break;
695
696 if (csid < sid)
697 continue;
698
699 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
700 break;
701
702 __skb_unlink(pos, lobby);
703 if (!event)
704 event = sctp_skb2event(pos);
705
706 __skb_queue_tail(&temp, pos);
707 }
708
709 if (!event && pos != (struct sk_buff *)lobby) {
710 cevent = (struct sctp_ulpevent *)pos->cb;
711 csid = cevent->stream;
712 cmid = cevent->mid;
713
714 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
715 sctp_mid_next(stream, in, csid);
716 __skb_unlink(pos, lobby);
717 __skb_queue_tail(&temp, pos);
718 event = sctp_skb2event(pos);
719 }
720 }
721
722 if (event) {
723 sctp_intl_retrieve_ordered(ulpq, event);
724 sctp_enqueue_event(ulpq, event);
725 }
726}
727
728static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
729{
730 struct sctp_stream *stream = &ulpq->asoc->stream;
731 __u16 sid;
732
733 for (sid = 0; sid < stream->incnt; sid++) {
734 struct sctp_stream_in *sin = &stream->in[sid];
735 __u32 mid;
736
737 if (sin->pd_mode) {
738 sin->pd_mode = 0;
739
740 mid = sin->mid;
741 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
742 sctp_mid_skip(stream, in, sid, mid);
743
744 sctp_intl_reap_ordered(ulpq, sid);
745 }
746 }
747
748 /* intl abort pd happens only when all data needs to be cleaned */
749 sctp_ulpq_flush(ulpq);
750}
751
655static struct sctp_stream_interleave sctp_stream_interleave_0 = { 752static struct sctp_stream_interleave sctp_stream_interleave_0 = {
656 .data_chunk_len = sizeof(struct sctp_data_chunk), 753 .data_chunk_len = sizeof(struct sctp_data_chunk),
657 /* DATA process functions */ 754 /* DATA process functions */
@@ -662,6 +759,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_0 = {
662 .enqueue_event = sctp_ulpq_tail_event, 759 .enqueue_event = sctp_ulpq_tail_event,
663 .renege_events = sctp_ulpq_renege, 760 .renege_events = sctp_ulpq_renege,
664 .start_pd = sctp_ulpq_partial_delivery, 761 .start_pd = sctp_ulpq_partial_delivery,
762 .abort_pd = sctp_ulpq_abort_pd,
665}; 763};
666 764
667static struct sctp_stream_interleave sctp_stream_interleave_1 = { 765static struct sctp_stream_interleave sctp_stream_interleave_1 = {
@@ -674,6 +772,7 @@ static struct sctp_stream_interleave sctp_stream_interleave_1 = {
674 .enqueue_event = sctp_enqueue_event, 772 .enqueue_event = sctp_enqueue_event,
675 .renege_events = sctp_renege_events, 773 .renege_events = sctp_renege_events,
676 .start_pd = sctp_intl_start_pd, 774 .start_pd = sctp_intl_start_pd,
775 .abort_pd = sctp_intl_abort_pd,
677}; 776};
678 777
679void sctp_stream_interleave_init(struct sctp_stream *stream) 778void sctp_stream_interleave_init(struct sctp_stream *stream)
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index d3218f3e9cf7..84207ad33e8e 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -730,8 +730,9 @@ fail:
730 * various events. 730 * various events.
731 */ 731 */
732struct sctp_ulpevent *sctp_ulpevent_make_pdapi( 732struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
733 const struct sctp_association *asoc, __u32 indication, 733 const struct sctp_association *asoc,
734 gfp_t gfp) 734 __u32 indication, __u32 sid, __u32 seq,
735 __u32 flags, gfp_t gfp)
735{ 736{
736 struct sctp_ulpevent *event; 737 struct sctp_ulpevent *event;
737 struct sctp_pdapi_event *pd; 738 struct sctp_pdapi_event *pd;
@@ -752,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
752 * Currently unused. 753 * Currently unused.
753 */ 754 */
754 pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 755 pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
755 pd->pdapi_flags = 0; 756 pd->pdapi_flags = flags;
757 pd->pdapi_stream = sid;
758 pd->pdapi_seq = seq;
756 759
757 /* pdapi_length: 32 bits (unsigned integer) 760 /* pdapi_length: 32 bits (unsigned integer)
758 * 761 *
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 76ec5149a093..dd53daab4a25 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1144,7 +1144,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1144 &sctp_sk(sk)->subscribe)) 1144 &sctp_sk(sk)->subscribe))
1145 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, 1145 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1146 SCTP_PARTIAL_DELIVERY_ABORTED, 1146 SCTP_PARTIAL_DELIVERY_ABORTED,
1147 gfp); 1147 0, 0, 0, gfp);
1148 if (ev) 1148 if (ev)
1149 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); 1149 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1150 1150