aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-12-16 17:05:45 -0500
committerDavid S. Miller <davem@davemloft.net>2007-12-16 17:05:45 -0500
commitef5d4cf2f9aae4e09883d2d664e367a16b47d857 (patch)
treeb6e83cf4f8b975c8bec0045c8a74d11496524f10 /net/sctp/ulpqueue.c
parent215f7b08f2a142ec19f4bd3d6de263e68b877955 (diff)
[SCTP]: Flush fragment queue when exiting partial delivery.
At the end of partial delivery, we may have complete messages sitting on the fragment queue. These messages are stuck there until a new fragment arrives. This can comletely stall a given association. When clearing partial delivery state, flush any complete messages from the fragment queue and send them on their way up. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4908041ffb31..1733fa29a501 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -53,6 +53,7 @@ static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *); 53 struct sctp_ulpevent *);
54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, 54static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *); 55 struct sctp_ulpevent *);
56static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
56 57
57/* 1st Level Abstractions */ 58/* 1st Level Abstractions */
58 59
@@ -190,6 +191,7 @@ static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
190static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) 191static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
191{ 192{
192 ulpq->pd_mode = 0; 193 ulpq->pd_mode = 0;
194 sctp_ulpq_reasm_drain(ulpq);
193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); 195 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
194} 196}
195 197
@@ -699,6 +701,37 @@ void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
699 } 701 }
700} 702}
701 703
704/*
705 * Drain the reassembly queue. If we just cleared parted delivery, it
706 * is possible that the reassembly queue will contain already reassembled
707 * messages. Retrieve any such messages and give them to the user.
708 */
709static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
710{
711 struct sctp_ulpevent *event = NULL;
712 struct sk_buff_head temp;
713
714 if (skb_queue_empty(&ulpq->reasm))
715 return;
716
717 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
718 /* Do ordering if needed. */
719 if ((event) && (event->msg_flags & MSG_EOR)){
720 skb_queue_head_init(&temp);
721 __skb_queue_tail(&temp, sctp_event2skb(event));
722
723 event = sctp_ulpq_order(ulpq, event);
724 }
725
726 /* Send event to the ULP. 'event' is the
727 * sctp_ulpevent for very first SKB on the temp' list.
728 */
729 if (event)
730 sctp_ulpq_tail_event(ulpq, event);
731 }
732}
733
734
702/* Helper function to gather skbs that have possibly become 735/* Helper function to gather skbs that have possibly become
703 * ordered by an an incoming chunk. 736 * ordered by an an incoming chunk.
704 */ 737 */