aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2008-01-15 11:41:56 -0500
committerVlad Yasevich <vladislav.yasevich@hp.com>2008-02-06 21:26:26 -0500
commitc068be5491924c1c1c37dc046f36976c27bc7bb2 (patch)
tree05212f5704d2c1aa66629ac20ac312d0af39eca1 /net/sctp/ulpqueue.c
parent01f2d38498957e967cd6f6011a6b208393957b4a (diff)
[SCTP]: Correctly reap SSNs when processing FORWARD_TSN chunk
When we recieve a FORWARD_TSN chunk, we need to reap all the queued fast-forwarded chunks from the ordering queue However, if we don't have them queued, we need to see if the next expected one is there as well. If it is, start deliver from that point instead of waiting for the next chunk to arrive. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c23
1 files changed, 20 insertions, 3 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index d300f4973a79..5061a26c5028 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -874,6 +874,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
874 struct sctp_ulpevent *event; 874 struct sctp_ulpevent *event;
875 struct sctp_stream *in; 875 struct sctp_stream *in;
876 struct sk_buff_head temp; 876 struct sk_buff_head temp;
877 struct sk_buff_head *lobby = &ulpq->lobby;
877 __u16 csid, cssn; 878 __u16 csid, cssn;
878 879
879 in = &ulpq->asoc->ssnmap->in; 880 in = &ulpq->asoc->ssnmap->in;
@@ -881,7 +882,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
881 /* We are holding the chunks by stream, by SSN. */ 882 /* We are holding the chunks by stream, by SSN. */
882 skb_queue_head_init(&temp); 883 skb_queue_head_init(&temp);
883 event = NULL; 884 event = NULL;
884 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { 885 sctp_skb_for_each(pos, lobby, tmp) {
885 cevent = (struct sctp_ulpevent *) pos->cb; 886 cevent = (struct sctp_ulpevent *) pos->cb;
886 csid = cevent->stream; 887 csid = cevent->stream;
887 cssn = cevent->ssn; 888 cssn = cevent->ssn;
@@ -895,10 +896,10 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
895 continue; 896 continue;
896 897
897 /* see if this ssn has been marked by skipping */ 898 /* see if this ssn has been marked by skipping */
898 if (!SSN_lte(cssn, sctp_ssn_peek(in, csid))) 899 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
899 break; 900 break;
900 901
901 __skb_unlink(pos, &ulpq->lobby); 902 __skb_unlink(pos, lobby);
902 if (!event) 903 if (!event)
903 /* Create a temporary list to collect chunks on. */ 904 /* Create a temporary list to collect chunks on. */
904 event = sctp_skb2event(pos); 905 event = sctp_skb2event(pos);
@@ -907,6 +908,22 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
907 __skb_queue_tail(&temp, pos); 908 __skb_queue_tail(&temp, pos);
908 } 909 }
909 910
911 /* If we didn't reap any data, see if the next expected SSN
912 * is next on the queue and if so, use that.
913 */
914 if (event == NULL && pos != (struct sk_buff *)lobby) {
915 cevent = (struct sctp_ulpevent *) pos->cb;
916 csid = cevent->stream;
917 cssn = cevent->ssn;
918
919 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
920 sctp_ssn_next(in, csid);
921 __skb_unlink(pos, lobby);
922 __skb_queue_tail(&temp, pos);
923 event = sctp_skb2event(pos);
924 }
925 }
926
910 /* Send event to the ULP. 'event' is the sctp_ulpevent for 927 /* Send event to the ULP. 'event' is the sctp_ulpevent for
911 * very first SKB on the 'temp' list. 928 * very first SKB on the 'temp' list.
912 */ 929 */