diff options
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r-- | net/sctp/ulpqueue.c | 87 |
1 files changed, 71 insertions, 16 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ada17464b65b..0fd5b3d2df03 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c | |||
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
106 | { | 106 | { |
107 | struct sk_buff_head temp; | 107 | struct sk_buff_head temp; |
108 | struct sctp_ulpevent *event; | 108 | struct sctp_ulpevent *event; |
109 | int event_eor = 0; | ||
109 | 110 | ||
110 | /* Create an event from the incoming chunk. */ | 111 | /* Create an event from the incoming chunk. */ |
111 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | 112 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); |
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
127 | /* Send event to the ULP. 'event' is the sctp_ulpevent for | 128 | /* Send event to the ULP. 'event' is the sctp_ulpevent for |
128 | * very first SKB on the 'temp' list. | 129 | * very first SKB on the 'temp' list. |
129 | */ | 130 | */ |
130 | if (event) | 131 | if (event) { |
132 | event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; | ||
131 | sctp_ulpq_tail_event(ulpq, event); | 133 | sctp_ulpq_tail_event(ulpq, event); |
134 | } | ||
132 | 135 | ||
133 | return 0; | 136 | return event_eor; |
134 | } | 137 | } |
135 | 138 | ||
136 | /* Add a new event for propagation to the ULP. */ | 139 | /* Add a new event for propagation to the ULP. */ |
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | |||
540 | ctsn = cevent->tsn; | 543 | ctsn = cevent->tsn; |
541 | 544 | ||
542 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | 545 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { |
546 | case SCTP_DATA_FIRST_FRAG: | ||
547 | if (!first_frag) | ||
548 | return NULL; | ||
549 | goto done; | ||
543 | case SCTP_DATA_MIDDLE_FRAG: | 550 | case SCTP_DATA_MIDDLE_FRAG: |
544 | if (!first_frag) { | 551 | if (!first_frag) { |
545 | first_frag = pos; | 552 | first_frag = pos; |
546 | next_tsn = ctsn + 1; | 553 | next_tsn = ctsn + 1; |
547 | last_frag = pos; | 554 | last_frag = pos; |
548 | } else if (next_tsn == ctsn) | 555 | } else if (next_tsn == ctsn) { |
549 | next_tsn++; | 556 | next_tsn++; |
550 | else | 557 | last_frag = pos; |
558 | } else | ||
551 | goto done; | 559 | goto done; |
552 | break; | 560 | break; |
553 | case SCTP_DATA_LAST_FRAG: | 561 | case SCTP_DATA_LAST_FRAG: |
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | |||
651 | } else | 659 | } else |
652 | goto done; | 660 | goto done; |
653 | break; | 661 | break; |
662 | |||
663 | case SCTP_DATA_LAST_FRAG: | ||
664 | if (!first_frag) | ||
665 | return NULL; | ||
666 | else | ||
667 | goto done; | ||
668 | break; | ||
669 | |||
654 | default: | 670 | default: |
655 | return NULL; | 671 | return NULL; |
656 | } | 672 | } |
@@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, | |||
962 | struct sk_buff_head *list, __u16 needed) | 978 | struct sk_buff_head *list, __u16 needed) |
963 | { | 979 | { |
964 | __u16 freed = 0; | 980 | __u16 freed = 0; |
965 | __u32 tsn; | 981 | __u32 tsn, last_tsn; |
966 | struct sk_buff *skb; | 982 | struct sk_buff *skb, *flist, *last; |
967 | struct sctp_ulpevent *event; | 983 | struct sctp_ulpevent *event; |
968 | struct sctp_tsnmap *tsnmap; | 984 | struct sctp_tsnmap *tsnmap; |
969 | 985 | ||
970 | tsnmap = &ulpq->asoc->peer.tsn_map; | 986 | tsnmap = &ulpq->asoc->peer.tsn_map; |
971 | 987 | ||
972 | while ((skb = __skb_dequeue_tail(list)) != NULL) { | 988 | while ((skb = skb_peek_tail(list)) != NULL) { |
973 | freed += skb_headlen(skb); | ||
974 | event = sctp_skb2event(skb); | 989 | event = sctp_skb2event(skb); |
975 | tsn = event->tsn; | 990 | tsn = event->tsn; |
976 | 991 | ||
992 | /* Don't renege below the Cumulative TSN ACK Point. */ | ||
993 | if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) | ||
994 | break; | ||
995 | |||
996 | /* Events in ordering queue may have multiple fragments | ||
997 | * corresponding to additional TSNs. Sum the total | ||
998 | * freed space; find the last TSN. | ||
999 | */ | ||
1000 | freed += skb_headlen(skb); | ||
1001 | flist = skb_shinfo(skb)->frag_list; | ||
1002 | for (last = flist; flist; flist = flist->next) { | ||
1003 | last = flist; | ||
1004 | freed += skb_headlen(last); | ||
1005 | } | ||
1006 | if (last) | ||
1007 | last_tsn = sctp_skb2event(last)->tsn; | ||
1008 | else | ||
1009 | last_tsn = tsn; | ||
1010 | |||
1011 | /* Unlink the event, then renege all applicable TSNs. */ | ||
1012 | __skb_unlink(skb, list); | ||
977 | sctp_ulpevent_free(event); | 1013 | sctp_ulpevent_free(event); |
978 | sctp_tsnmap_renege(tsnmap, tsn); | 1014 | while (TSN_lte(tsn, last_tsn)) { |
1015 | sctp_tsnmap_renege(tsnmap, tsn); | ||
1016 | tsn++; | ||
1017 | } | ||
979 | if (freed >= needed) | 1018 | if (freed >= needed) |
980 | return freed; | 1019 | return freed; |
981 | } | 1020 | } |
@@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | |||
1002 | struct sctp_ulpevent *event; | 1041 | struct sctp_ulpevent *event; |
1003 | struct sctp_association *asoc; | 1042 | struct sctp_association *asoc; |
1004 | struct sctp_sock *sp; | 1043 | struct sctp_sock *sp; |
1044 | __u32 ctsn; | ||
1045 | struct sk_buff *skb; | ||
1005 | 1046 | ||
1006 | asoc = ulpq->asoc; | 1047 | asoc = ulpq->asoc; |
1007 | sp = sctp_sk(asoc->base.sk); | 1048 | sp = sctp_sk(asoc->base.sk); |
1008 | 1049 | ||
1009 | /* If the association is already in Partial Delivery mode | 1050 | /* If the association is already in Partial Delivery mode |
1010 | * we have noting to do. | 1051 | * we have nothing to do. |
1011 | */ | 1052 | */ |
1012 | if (ulpq->pd_mode) | 1053 | if (ulpq->pd_mode) |
1013 | return; | 1054 | return; |
1014 | 1055 | ||
1056 | /* Data must be at or below the Cumulative TSN ACK Point to | ||
1057 | * start partial delivery. | ||
1058 | */ | ||
1059 | skb = skb_peek(&asoc->ulpq.reasm); | ||
1060 | if (skb != NULL) { | ||
1061 | ctsn = sctp_skb2event(skb)->tsn; | ||
1062 | if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) | ||
1063 | return; | ||
1064 | } | ||
1065 | |||
1015 | /* If the user enabled fragment interleave socket option, | 1066 | /* If the user enabled fragment interleave socket option, |
1016 | * multiple associations can enter partial delivery. | 1067 | * multiple associations can enter partial delivery. |
1017 | * Otherwise, we can only enter partial delivery if the | 1068 | * Otherwise, we can only enter partial delivery if the |
@@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | |||
1054 | } | 1105 | } |
1055 | /* If able to free enough room, accept this chunk. */ | 1106 | /* If able to free enough room, accept this chunk. */ |
1056 | if (chunk && (freed >= needed)) { | 1107 | if (chunk && (freed >= needed)) { |
1057 | __u32 tsn; | 1108 | int retval; |
1058 | tsn = ntohl(chunk->subh.data_hdr->tsn); | 1109 | retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); |
1059 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); | 1110 | /* |
1060 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | 1111 | * Enter partial delivery if chunk has not been |
1061 | 1112 | * delivered; otherwise, drain the reassembly queue. | |
1062 | sctp_ulpq_partial_delivery(ulpq, gfp); | 1113 | */ |
1114 | if (retval <= 0) | ||
1115 | sctp_ulpq_partial_delivery(ulpq, gfp); | ||
1116 | else if (retval == 1) | ||
1117 | sctp_ulpq_reasm_drain(ulpq); | ||
1063 | } | 1118 | } |
1064 | 1119 | ||
1065 | sk_mem_reclaim(asoc->base.sk); | 1120 | sk_mem_reclaim(asoc->base.sk); |