diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-23 23:30:25 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-24 00:27:52 -0400 |
commit | 16d14ef9f29dfa9b1d99f3eff860e9f15bc99f39 (patch) | |
tree | 8f13d343e807b79a23706d10d291f62f5a0a958f /net/sctp/ulpqueue.c | |
parent | 5c58298c2536252ab95aa2b1497ab47eb878ca5d (diff) |
[SCTP]: Consolidate sctp_ulpq_renege_xxx functions
Both are equal, except for the list to be traversed.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r-- | net/sctp/ulpqueue.c | 34 |
1 files changed, 10 insertions, 24 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index b9370956b187..4be92d0a2cab 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -908,8 +908,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | |||
908 | return; | 908 | return; |
909 | } | 909 | } |
910 | 910 | ||
911 | /* Renege 'needed' bytes from the ordering queue. */ | 911 | static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, |
912 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | 912 | struct sk_buff_head *list, __u16 needed) |
913 | { | 913 | { |
914 | __u16 freed = 0; | 914 | __u16 freed = 0; |
915 | __u32 tsn; | 915 | __u32 tsn; |
@@ -919,7 +919,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
919 | 919 | ||
920 | tsnmap = &ulpq->asoc->peer.tsn_map; | 920 | tsnmap = &ulpq->asoc->peer.tsn_map; |
921 | 921 | ||
922 | while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { | 922 | while ((skb = __skb_dequeue_tail(list)) != NULL) { |
923 | freed += skb_headlen(skb); | 923 | freed += skb_headlen(skb); |
924 | event = sctp_skb2event(skb); | 924 | event = sctp_skb2event(skb); |
925 | tsn = event->tsn; | 925 | tsn = event->tsn; |
@@ -933,30 +933,16 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | |||
933 | return freed; | 933 | return freed; |
934 | } | 934 | } |
935 | 935 | ||
936 | /* Renege 'needed' bytes from the ordering queue. */ | ||
937 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | ||
938 | { | ||
939 | return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); | ||
940 | } | ||
941 | |||
936 | /* Renege 'needed' bytes from the reassembly queue. */ | 942 | /* Renege 'needed' bytes from the reassembly queue. */ |
937 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | 943 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) |
938 | { | 944 | { |
939 | __u16 freed = 0; | 945 | return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); |
940 | __u32 tsn; | ||
941 | struct sk_buff *skb; | ||
942 | struct sctp_ulpevent *event; | ||
943 | struct sctp_tsnmap *tsnmap; | ||
944 | |||
945 | tsnmap = &ulpq->asoc->peer.tsn_map; | ||
946 | |||
947 | /* Walk backwards through the list, reneges the newest tsns. */ | ||
948 | while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { | ||
949 | freed += skb_headlen(skb); | ||
950 | event = sctp_skb2event(skb); | ||
951 | tsn = event->tsn; | ||
952 | |||
953 | sctp_ulpevent_free(event); | ||
954 | sctp_tsnmap_renege(tsnmap, tsn); | ||
955 | if (freed >= needed) | ||
956 | return freed; | ||
957 | } | ||
958 | |||
959 | return freed; | ||
960 | } | 946 | } |
961 | 947 | ||
962 | /* Partial deliver the first message as there is pressure on rwnd. */ | 948 | /* Partial deliver the first message as there is pressure on rwnd. */ |