aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-07-13 17:01:19 -0400
committerVlad Yasevich <vladislav.yasevich@hp.com>2007-08-29 13:34:33 -0400
commitea2dfb3733d53ac98b17756435d1f99e25490357 (patch)
tree9a70c036bcf1ed57a059efa245cbb63f300db0c5 /net/sctp
parentb07d68b5ca4d55a16fab223d63d5fb36f89ff42f (diff)
SCTP: properly clean up fragment and ordering queues during FWD-TSN.
When we recieve a FWD-TSN (meaning the peer has abandoned the data), we need to clean up any partially received messages that may be hanging out on the re-assembly or re-ordering queues. This is a MUST requirement that was not properly done before. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com.>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/ulpqueue.c75
2 files changed, 65 insertions, 13 deletions
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d9fad4f6ffc3..1907318e70f1 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1130,6 +1130,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1130 /* Move the Cumulattive TSN Ack ahead. */ 1130 /* Move the Cumulattive TSN Ack ahead. */
1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); 1131 sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1132 1132
1133 /* purge the fragmentation queue */
1134 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1135
1133 /* Abort any in progress partial delivery. */ 1136 /* Abort any in progress partial delivery. */
1134 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1137 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1135 break; 1138 break;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 34eb977a204d..fa0ba2a5564e 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -659,6 +659,46 @@ done:
659 return retval; 659 return retval;
660} 660}
661 661
662/*
663 * Flush out stale fragments from the reassembly queue when processing
664 * a Forward TSN.
665 *
666 * RFC 3758, Section 3.6
667 *
668 * After receiving and processing a FORWARD TSN, the data receiver MUST
669 * take cautions in updating its re-assembly queue. The receiver MUST
670 * remove any partially reassembled message, which is still missing one
671 * or more TSNs earlier than or equal to the new cumulative TSN point.
672 * In the event that the receiver has invoked the partial delivery API,
673 * a notification SHOULD also be generated to inform the upper layer API
674 * that the message being partially delivered will NOT be completed.
675 */
676void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
677{
678 struct sk_buff *pos, *tmp;
679 struct sctp_ulpevent *event;
680 __u32 tsn;
681
682 if (skb_queue_empty(&ulpq->reasm))
683 return;
684
685 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
686 event = sctp_skb2event(pos);
687 tsn = event->tsn;
688
689 /* Since the entire message must be abandoned by the
690 * sender (item A3 in Section 3.5, RFC 3758), we can
691 * free all fragments on the list that are less then
692 * or equal to ctsn_point
693 */
694 if (TSN_lte(tsn, fwd_tsn)) {
695 __skb_unlink(pos, &ulpq->reasm);
696 sctp_ulpevent_free(event);
697 } else
698 break;
699 }
700}
701
662/* Helper function to gather skbs that have possibly become 702/* Helper function to gather skbs that have possibly become
663 * ordered by an an incoming chunk. 703 * ordered by an an incoming chunk.
664 */ 704 */
@@ -794,7 +834,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
794/* Helper function to gather skbs that have possibly become 834/* Helper function to gather skbs that have possibly become
795 * ordered by forward tsn skipping their dependencies. 835 * ordered by forward tsn skipping their dependencies.
796 */ 836 */
797static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) 837static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
798{ 838{
799 struct sk_buff *pos, *tmp; 839 struct sk_buff *pos, *tmp;
800 struct sctp_ulpevent *cevent; 840 struct sctp_ulpevent *cevent;
@@ -813,31 +853,40 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
813 csid = cevent->stream; 853 csid = cevent->stream;
814 cssn = cevent->ssn; 854 cssn = cevent->ssn;
815 855
816 if (cssn != sctp_ssn_peek(in, csid)) 856 /* Have we gone too far? */
857 if (csid > sid)
817 break; 858 break;
818 859
819 /* Found it, so mark in the ssnmap. */ 860 /* Have we not gone far enough? */
820 sctp_ssn_next(in, csid); 861 if (csid < sid)
862 continue;
863
864 /* see if this ssn has been marked by skipping */
865 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
866 break;
821 867
822 __skb_unlink(pos, &ulpq->lobby); 868 __skb_unlink(pos, &ulpq->lobby);
823 if (!event) { 869 if (!event)
824 /* Create a temporary list to collect chunks on. */ 870 /* Create a temporary list to collect chunks on. */
825 event = sctp_skb2event(pos); 871 event = sctp_skb2event(pos);
826 __skb_queue_tail(&temp, sctp_event2skb(event)); 872
827 } else { 873 /* Attach all gathered skbs to the event. */
828 /* Attach all gathered skbs to the event. */ 874 __skb_queue_tail(&temp, pos);
829 __skb_queue_tail(&temp, pos);
830 }
831 } 875 }
832 876
833 /* Send event to the ULP. 'event' is the sctp_ulpevent for 877 /* Send event to the ULP. 'event' is the sctp_ulpevent for
834 * very first SKB on the 'temp' list. 878 * very first SKB on the 'temp' list.
835 */ 879 */
836 if (event) 880 if (event) {
881 /* see if we have more ordered that we can deliver */
882 sctp_ulpq_retrieve_ordered(ulpq, event);
837 sctp_ulpq_tail_event(ulpq, event); 883 sctp_ulpq_tail_event(ulpq, event);
884 }
838} 885}
839 886
840/* Skip over an SSN. */ 887/* Skip over an SSN. This is used during the processing of
888 * Forwared TSN chunk to skip over the abandoned ordered data
889 */
841void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) 890void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
842{ 891{
843 struct sctp_stream *in; 892 struct sctp_stream *in;
@@ -855,7 +904,7 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
855 /* Go find any other chunks that were waiting for 904 /* Go find any other chunks that were waiting for
856 * ordering and deliver them if needed. 905 * ordering and deliver them if needed.
857 */ 906 */
858 sctp_ulpq_reap_ordered(ulpq); 907 sctp_ulpq_reap_ordered(ulpq, sid);
859 return; 908 return;
860} 909}
861 910