aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXin Long <lucien.xin@gmail.com>2017-12-08 08:04:09 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-11 11:23:05 -0500
commit132282386f5d0eff7a84a119599216b5f9e9bfc6 (patch)
treed38678a52bc8be07f40964b91c6afb839fa8cf03
parent65f5e357839e40817aead853d7a7f61ff828b52b (diff)
sctp: add support for the process of unordered idata
Unordered idata process is more complicated than unordered data: - It has to add mid into sctp_stream_out to save the next mid value, which is separated from ordered idata's. - To support pd for unordered idata, another mid and pd_mode need to be added to save the message id and pd state in sctp_stream_in. - To make unordered idata reasm easier, it adds a new event queue to save frags for idata. The patch mostly adds the samilar reasm functions for unordered idata as ordered idata's, and also adjusts some other codes on assign_mid, abort_pd and ulpevent_data for idata. Signed-off-by: Xin Long <lucien.xin@gmail.com> Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/net/sctp/ulpqueue.h1
-rw-r--r--net/sctp/socket.c23
-rw-r--r--net/sctp/stream_interleave.c377
-rw-r--r--net/sctp/ulpqueue.c5
5 files changed, 392 insertions, 28 deletions
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 73b315de2fef..8ef638d966f1 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -413,6 +413,14 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
413 413
414#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid]) 414#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
415 415
416/* What is the current MID_uo number for this stream? */
417#define sctp_mid_uo_peek(stream, type, sid) \
418 ((stream)->type[sid].mid_uo)
419
420/* Return the next MID_uo number for this stream. */
421#define sctp_mid_uo_next(stream, type, sid) \
422 ((stream)->type[sid].mid_uo++)
423
416/* 424/*
417 * Pointers to address related SCTP functions. 425 * Pointers to address related SCTP functions.
418 * (i.e. things that depend on the address family.) 426 * (i.e. things that depend on the address family.)
@@ -1379,8 +1387,9 @@ struct sctp_stream_out {
1379 __u32 mid; 1387 __u32 mid;
1380 __u16 ssn; 1388 __u16 ssn;
1381 }; 1389 };
1382 __u8 state; 1390 __u32 mid_uo;
1383 struct sctp_stream_out_ext *ext; 1391 struct sctp_stream_out_ext *ext;
1392 __u8 state;
1384}; 1393};
1385 1394
1386struct sctp_stream_in { 1395struct sctp_stream_in {
@@ -1388,8 +1397,11 @@ struct sctp_stream_in {
1388 __u32 mid; 1397 __u32 mid;
1389 __u16 ssn; 1398 __u16 ssn;
1390 }; 1399 };
1400 __u32 mid_uo;
1391 __u32 fsn; 1401 __u32 fsn;
1402 __u32 fsn_uo;
1392 char pd_mode; 1403 char pd_mode;
1404 char pd_mode_uo;
1393}; 1405};
1394 1406
1395struct sctp_stream { 1407struct sctp_stream {
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index eb98c7150a56..bb0ecba3db2b 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
45 char pd_mode; 45 char pd_mode;
46 struct sctp_association *asoc; 46 struct sctp_association *asoc;
47 struct sk_buff_head reasm; 47 struct sk_buff_head reasm;
48 struct sk_buff_head reasm_uo;
48 struct sk_buff_head lobby; 49 struct sk_buff_head lobby;
49}; 50};
50 51
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c58a1fc02978..7eec0a0b7f79 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
201 cb(chunk); 201 cb(chunk);
202} 202}
203 203
204static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
205 void (*cb)(struct sk_buff *, struct sock *))
206
207{
208 struct sk_buff *skb, *tmp;
209
210 sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
211 cb(skb, sk);
212
213 sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
214 cb(skb, sk);
215
216 sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
217 cb(skb, sk);
218}
219
204/* Verify that this is a valid address. */ 220/* Verify that this is a valid address. */
205static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, 221static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
206 int len) 222 int len)
@@ -1554,6 +1570,7 @@ static void sctp_close(struct sock *sk, long timeout)
1554 1570
1555 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || 1571 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1556 !skb_queue_empty(&asoc->ulpq.reasm) || 1572 !skb_queue_empty(&asoc->ulpq.reasm) ||
1573 !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
1557 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { 1574 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1558 struct sctp_chunk *chunk; 1575 struct sctp_chunk *chunk;
1559 1576
@@ -8495,11 +8512,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
8495 8512
8496 } 8513 }
8497 8514
8498 sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) 8515 sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
8499 sctp_skb_set_owner_r_frag(skb, newsk);
8500
8501 sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
8502 sctp_skb_set_owner_r_frag(skb, newsk);
8503 8516
8504 /* Set the type of socket to indicate that it is peeled off from the 8517 /* Set the type of socket to indicate that it is peeled off from the
8505 * original UDP-style socket or created with the accept() call on a 8518 * original UDP-style socket or created with the accept() call on a
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index d15645ea338b..87b9417c9892 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -74,12 +74,10 @@ static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
74 74
75 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) { 75 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
76 struct sctp_idatahdr *hdr; 76 struct sctp_idatahdr *hdr;
77 __u32 mid;
77 78
78 lchunk->has_mid = 1; 79 lchunk->has_mid = 1;
79 80
80 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
81 continue;
82
83 hdr = lchunk->subh.idata_hdr; 81 hdr = lchunk->subh.idata_hdr;
84 82
85 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) 83 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
@@ -87,10 +85,16 @@ static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
87 else 85 else
88 hdr->fsn = htonl(cfsn++); 86 hdr->fsn = htonl(cfsn++);
89 87
90 if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) 88 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
91 hdr->mid = htonl(sctp_mid_next(stream, out, sid)); 89 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
92 else 90 sctp_mid_uo_next(stream, out, sid) :
93 hdr->mid = htonl(sctp_mid_peek(stream, out, sid)); 91 sctp_mid_uo_peek(stream, out, sid);
92 } else {
93 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
94 sctp_mid_next(stream, out, sid) :
95 sctp_mid_peek(stream, out, sid);
96 }
97 hdr->mid = htonl(mid);
94 } 98 }
95} 99}
96 100
@@ -449,9 +453,6 @@ static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
449 struct sctp_stream *stream; 453 struct sctp_stream *stream;
450 __u16 sid; 454 __u16 sid;
451 455
452 if (event->msg_flags & SCTP_DATA_UNORDERED)
453 return event;
454
455 stream = &ulpq->asoc->stream; 456 stream = &ulpq->asoc->stream;
456 sid = event->stream; 457 sid = event->stream;
457 458
@@ -512,6 +513,317 @@ out_free:
512 return 0; 513 return 0;
513} 514}
514 515
516static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
517 struct sctp_ulpevent *event)
518{
519 struct sctp_ulpevent *cevent;
520 struct sk_buff *pos;
521
522 pos = skb_peek_tail(&ulpq->reasm_uo);
523 if (!pos) {
524 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
525 return;
526 }
527
528 cevent = sctp_skb2event(pos);
529
530 if (event->stream == cevent->stream &&
531 event->mid == cevent->mid &&
532 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
533 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
534 event->fsn > cevent->fsn))) {
535 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
536 return;
537 }
538
539 if ((event->stream == cevent->stream &&
540 MID_lt(cevent->mid, event->mid)) ||
541 event->stream > cevent->stream) {
542 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
543 return;
544 }
545
546 skb_queue_walk(&ulpq->reasm_uo, pos) {
547 cevent = sctp_skb2event(pos);
548
549 if (event->stream < cevent->stream ||
550 (event->stream == cevent->stream &&
551 MID_lt(event->mid, cevent->mid)))
552 break;
553
554 if (event->stream == cevent->stream &&
555 event->mid == cevent->mid &&
556 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
557 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
558 event->fsn < cevent->fsn))
559 break;
560 }
561
562 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
563}
564
565static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
566 struct sctp_ulpq *ulpq,
567 struct sctp_ulpevent *event)
568{
569 struct sk_buff *first_frag = NULL;
570 struct sk_buff *last_frag = NULL;
571 struct sctp_ulpevent *retval;
572 struct sctp_stream_in *sin;
573 struct sk_buff *pos;
574 __u32 next_fsn = 0;
575 int is_last = 0;
576
577 sin = sctp_stream_in(ulpq->asoc, event->stream);
578
579 skb_queue_walk(&ulpq->reasm_uo, pos) {
580 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
581
582 if (cevent->stream < event->stream)
583 continue;
584 if (cevent->stream > event->stream)
585 break;
586
587 if (MID_lt(cevent->mid, sin->mid_uo))
588 continue;
589 if (MID_lt(sin->mid_uo, cevent->mid))
590 break;
591
592 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
593 case SCTP_DATA_FIRST_FRAG:
594 goto out;
595 case SCTP_DATA_MIDDLE_FRAG:
596 if (!first_frag) {
597 if (cevent->fsn == sin->fsn_uo) {
598 first_frag = pos;
599 last_frag = pos;
600 next_fsn = cevent->fsn + 1;
601 }
602 } else if (cevent->fsn == next_fsn) {
603 last_frag = pos;
604 next_fsn++;
605 } else {
606 goto out;
607 }
608 break;
609 case SCTP_DATA_LAST_FRAG:
610 if (!first_frag) {
611 if (cevent->fsn == sin->fsn_uo) {
612 first_frag = pos;
613 last_frag = pos;
614 next_fsn = 0;
615 is_last = 1;
616 }
617 } else if (cevent->fsn == next_fsn) {
618 last_frag = pos;
619 next_fsn = 0;
620 is_last = 1;
621 }
622 goto out;
623 default:
624 goto out;
625 }
626 }
627
628out:
629 if (!first_frag)
630 return NULL;
631
632 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
633 &ulpq->reasm_uo, first_frag,
634 last_frag);
635 if (retval) {
636 sin->fsn_uo = next_fsn;
637 if (is_last) {
638 retval->msg_flags |= MSG_EOR;
639 sin->pd_mode_uo = 0;
640 }
641 }
642
643 return retval;
644}
645
646static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
647 struct sctp_ulpq *ulpq,
648 struct sctp_ulpevent *event)
649{
650 struct sctp_association *asoc = ulpq->asoc;
651 struct sk_buff *pos, *first_frag = NULL;
652 struct sctp_ulpevent *retval = NULL;
653 struct sk_buff *pd_first = NULL;
654 struct sk_buff *pd_last = NULL;
655 struct sctp_stream_in *sin;
656 __u32 next_fsn = 0;
657 __u32 pd_point = 0;
658 __u32 pd_len = 0;
659 __u32 mid = 0;
660
661 sin = sctp_stream_in(ulpq->asoc, event->stream);
662
663 skb_queue_walk(&ulpq->reasm_uo, pos) {
664 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
665
666 if (cevent->stream < event->stream)
667 continue;
668 if (cevent->stream > event->stream)
669 break;
670
671 if (MID_lt(cevent->mid, event->mid))
672 continue;
673 if (MID_lt(event->mid, cevent->mid))
674 break;
675
676 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
677 case SCTP_DATA_FIRST_FRAG:
678 if (!sin->pd_mode_uo) {
679 sin->mid_uo = cevent->mid;
680 pd_first = pos;
681 pd_last = pos;
682 pd_len = pos->len;
683 }
684
685 first_frag = pos;
686 next_fsn = 0;
687 mid = cevent->mid;
688 break;
689
690 case SCTP_DATA_MIDDLE_FRAG:
691 if (first_frag && cevent->mid == mid &&
692 cevent->fsn == next_fsn) {
693 next_fsn++;
694 if (pd_first) {
695 pd_last = pos;
696 pd_len += pos->len;
697 }
698 } else {
699 first_frag = NULL;
700 }
701 break;
702
703 case SCTP_DATA_LAST_FRAG:
704 if (first_frag && cevent->mid == mid &&
705 cevent->fsn == next_fsn)
706 goto found;
707 else
708 first_frag = NULL;
709 break;
710 }
711 }
712
713 if (!pd_first)
714 goto out;
715
716 pd_point = sctp_sk(asoc->base.sk)->pd_point;
717 if (pd_point && pd_point <= pd_len) {
718 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
719 &ulpq->reasm_uo,
720 pd_first, pd_last);
721 if (retval) {
722 sin->fsn_uo = next_fsn;
723 sin->pd_mode_uo = 1;
724 }
725 }
726 goto out;
727
728found:
729 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
730 &ulpq->reasm_uo,
731 first_frag, pos);
732 if (retval)
733 retval->msg_flags |= MSG_EOR;
734
735out:
736 return retval;
737}
738
739static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
740 struct sctp_ulpevent *event)
741{
742 struct sctp_ulpevent *retval = NULL;
743 struct sctp_stream_in *sin;
744
745 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
746 event->msg_flags |= MSG_EOR;
747 return event;
748 }
749
750 sctp_intl_store_reasm_uo(ulpq, event);
751
752 sin = sctp_stream_in(ulpq->asoc, event->stream);
753 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
754 event->fsn == sin->fsn_uo)
755 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
756
757 if (!retval)
758 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
759
760 return retval;
761}
762
763static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
764{
765 struct sctp_stream_in *csin, *sin = NULL;
766 struct sk_buff *first_frag = NULL;
767 struct sk_buff *last_frag = NULL;
768 struct sctp_ulpevent *retval;
769 struct sk_buff *pos;
770 __u32 next_fsn = 0;
771 __u16 sid = 0;
772
773 skb_queue_walk(&ulpq->reasm_uo, pos) {
774 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
775
776 csin = sctp_stream_in(ulpq->asoc, cevent->stream);
777 if (csin->pd_mode_uo)
778 continue;
779
780 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
781 case SCTP_DATA_FIRST_FRAG:
782 if (first_frag)
783 goto out;
784 first_frag = pos;
785 last_frag = pos;
786 next_fsn = 0;
787 sin = csin;
788 sid = cevent->stream;
789 sin->mid_uo = cevent->mid;
790 break;
791 case SCTP_DATA_MIDDLE_FRAG:
792 if (!first_frag)
793 break;
794 if (cevent->stream == sid &&
795 cevent->mid == sin->mid_uo &&
796 cevent->fsn == next_fsn) {
797 next_fsn++;
798 last_frag = pos;
799 } else {
800 goto out;
801 }
802 break;
803 case SCTP_DATA_LAST_FRAG:
804 if (first_frag)
805 goto out;
806 break;
807 default:
808 break;
809 }
810 }
811
812 if (!first_frag)
813 return NULL;
814
815out:
816 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
817 &ulpq->reasm_uo, first_frag,
818 last_frag);
819 if (retval) {
820 sin->fsn_uo = next_fsn;
821 sin->pd_mode_uo = 1;
822 }
823
824 return retval;
825}
826
515static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq, 827static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
516 struct sctp_chunk *chunk, gfp_t gfp) 828 struct sctp_chunk *chunk, gfp_t gfp)
517{ 829{
@@ -529,12 +841,16 @@ static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
529 else 841 else
530 event->fsn = ntohl(chunk->subh.idata_hdr->fsn); 842 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
531 843
532 event = sctp_intl_reasm(ulpq, event); 844 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
533 if (event && event->msg_flags & MSG_EOR) { 845 event = sctp_intl_reasm(ulpq, event);
534 skb_queue_head_init(&temp); 846 if (event && event->msg_flags & MSG_EOR) {
535 __skb_queue_tail(&temp, sctp_event2skb(event)); 847 skb_queue_head_init(&temp);
848 __skb_queue_tail(&temp, sctp_event2skb(event));
536 849
537 event = sctp_intl_order(ulpq, event); 850 event = sctp_intl_order(ulpq, event);
851 }
852 } else {
853 event = sctp_intl_reasm_uo(ulpq, event);
538 } 854 }
539 855
540 if (event) { 856 if (event) {
@@ -614,14 +930,21 @@ static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
614{ 930{
615 struct sctp_ulpevent *event; 931 struct sctp_ulpevent *event;
616 932
617 if (skb_queue_empty(&ulpq->reasm)) 933 if (!skb_queue_empty(&ulpq->reasm)) {
618 return; 934 do {
935 event = sctp_intl_retrieve_first(ulpq);
936 if (event)
937 sctp_enqueue_event(ulpq, event);
938 } while (event);
939 }
619 940
620 do { 941 if (!skb_queue_empty(&ulpq->reasm_uo)) {
621 event = sctp_intl_retrieve_first(ulpq); 942 do {
622 if (event) 943 event = sctp_intl_retrieve_first_uo(ulpq);
623 sctp_enqueue_event(ulpq, event); 944 if (event)
624 } while (event); 945 sctp_enqueue_event(ulpq, event);
946 } while (event);
947 }
625} 948}
626 949
627static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, 950static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
@@ -643,6 +966,9 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
643 if (freed < needed) 966 if (freed < needed)
644 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm, 967 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
645 needed); 968 needed);
969 if (freed < needed)
970 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
971 needed);
646 } 972 }
647 973
648 if (chunk && freed >= needed) 974 if (chunk && freed >= needed)
@@ -734,6 +1060,13 @@ static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
734 struct sctp_stream_in *sin = &stream->in[sid]; 1060 struct sctp_stream_in *sin = &stream->in[sid];
735 __u32 mid; 1061 __u32 mid;
736 1062
1063 if (sin->pd_mode_uo) {
1064 sin->pd_mode_uo = 0;
1065
1066 mid = sin->mid_uo;
1067 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1068 }
1069
737 if (sin->pd_mode) { 1070 if (sin->pd_mode) {
738 sin->pd_mode = 0; 1071 sin->pd_mode = 0;
739 1072
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index dd53daab4a25..97fae53310e0 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
60 60
61 ulpq->asoc = asoc; 61 ulpq->asoc = asoc;
62 skb_queue_head_init(&ulpq->reasm); 62 skb_queue_head_init(&ulpq->reasm);
63 skb_queue_head_init(&ulpq->reasm_uo);
63 skb_queue_head_init(&ulpq->lobby); 64 skb_queue_head_init(&ulpq->lobby);
64 ulpq->pd_mode = 0; 65 ulpq->pd_mode = 0;
65 66
@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
83 sctp_ulpevent_free(event); 84 sctp_ulpevent_free(event);
84 } 85 }
85 86
87 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
88 event = sctp_skb2event(skb);
89 sctp_ulpevent_free(event);
90 }
86} 91}
87 92
88/* Dispose of a ulpqueue. */ 93/* Dispose of a ulpqueue. */