aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp
diff options
context:
space:
mode:
authorLee A. Roberts <lee.roberts@hp.com>2013-02-27 23:37:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-28 15:34:27 -0500
commitd003b41b801124b96337973b01eada6a83673d23 (patch)
tree726fb6ef8bff813011a19098dd374ce1707edae4 /net/sctp
parent95ac7b859f508b1b3e6adf7dce307864e4384a69 (diff)
sctp: fix association hangs due to partial delivery errors
In sctp_ulpq_tail_data(), use return values 0,1 to indicate whether a complete event (with MSG_EOR set) was delivered. A return value of -ENOMEM continues to indicate an out-of-memory condition was encountered. In sctp_ulpq_retrieve_partial() and sctp_ulpq_retrieve_first(), correct message reassembly logic for SCTP partial delivery. Change logic to ensure that as much data as possible is sent with the initial partial delivery and that following partial deliveries contain all available data. In sctp_ulpq_partial_delivery(), attempt partial delivery only if the data on the head of the reassembly queue is at or before the cumulative TSN ACK point. In sctp_ulpq_renege(), use the modified return values from sctp_ulpq_tail_data() to choose whether to attempt partial delivery or to attempt to drain the reassembly queue as a means to reduce memory pressure. Remove call to sctp_tsnmap_mark(), as this is handled correctly in call to sctp_ulpq_tail_data(). Signed-off-by: Lee A. Roberts <lee.roberts@hp.com> Acked-by: Vlad Yasevich <vyasevich@gmail.com> Acked-by: Neil Horman <nhorman@tuxdriver.com>
Diffstat (limited to 'net/sctp')
-rw-r--r--net/sctp/ulpqueue.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f221fbbc80ac..0fd5b3d2df03 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
106{ 106{
107 struct sk_buff_head temp; 107 struct sk_buff_head temp;
108 struct sctp_ulpevent *event; 108 struct sctp_ulpevent *event;
109 int event_eor = 0;
109 110
110 /* Create an event from the incoming chunk. */ 111 /* Create an event from the incoming chunk. */
111 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); 112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
@@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for 128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list. 129 * very first SKB on the 'temp' list.
129 */ 130 */
130 if (event) 131 if (event) {
132 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
131 sctp_ulpq_tail_event(ulpq, event); 133 sctp_ulpq_tail_event(ulpq, event);
134 }
132 135
133 return 0; 136 return event_eor;
134} 137}
135 138
136/* Add a new event for propagation to the ULP. */ 139/* Add a new event for propagation to the ULP. */
@@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
540 ctsn = cevent->tsn; 543 ctsn = cevent->tsn;
541 544
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 545 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
546 case SCTP_DATA_FIRST_FRAG:
547 if (!first_frag)
548 return NULL;
549 goto done;
543 case SCTP_DATA_MIDDLE_FRAG: 550 case SCTP_DATA_MIDDLE_FRAG:
544 if (!first_frag) { 551 if (!first_frag) {
545 first_frag = pos; 552 first_frag = pos;
546 next_tsn = ctsn + 1; 553 next_tsn = ctsn + 1;
547 last_frag = pos; 554 last_frag = pos;
548 } else if (next_tsn == ctsn) 555 } else if (next_tsn == ctsn) {
549 next_tsn++; 556 next_tsn++;
550 else 557 last_frag = pos;
558 } else
551 goto done; 559 goto done;
552 break; 560 break;
553 case SCTP_DATA_LAST_FRAG: 561 case SCTP_DATA_LAST_FRAG:
@@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
651 } else 659 } else
652 goto done; 660 goto done;
653 break; 661 break;
662
663 case SCTP_DATA_LAST_FRAG:
664 if (!first_frag)
665 return NULL;
666 else
667 goto done;
668 break;
669
654 default: 670 default:
655 return NULL; 671 return NULL;
656 } 672 }
@@ -1025,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1025 struct sctp_ulpevent *event; 1041 struct sctp_ulpevent *event;
1026 struct sctp_association *asoc; 1042 struct sctp_association *asoc;
1027 struct sctp_sock *sp; 1043 struct sctp_sock *sp;
1044 __u32 ctsn;
1045 struct sk_buff *skb;
1028 1046
1029 asoc = ulpq->asoc; 1047 asoc = ulpq->asoc;
1030 sp = sctp_sk(asoc->base.sk); 1048 sp = sctp_sk(asoc->base.sk);
1031 1049
1032 /* If the association is already in Partial Delivery mode 1050 /* If the association is already in Partial Delivery mode
1033 * we have noting to do. 1051 * we have nothing to do.
1034 */ 1052 */
1035 if (ulpq->pd_mode) 1053 if (ulpq->pd_mode)
1036 return; 1054 return;
1037 1055
1056 /* Data must be at or below the Cumulative TSN ACK Point to
1057 * start partial delivery.
1058 */
1059 skb = skb_peek(&asoc->ulpq.reasm);
1060 if (skb != NULL) {
1061 ctsn = sctp_skb2event(skb)->tsn;
1062 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1063 return;
1064 }
1065
1038 /* If the user enabled fragment interleave socket option, 1066 /* If the user enabled fragment interleave socket option,
1039 * multiple associations can enter partial delivery. 1067 * multiple associations can enter partial delivery.
1040 * Otherwise, we can only enter partial delivery if the 1068 * Otherwise, we can only enter partial delivery if the
@@ -1077,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1077 } 1105 }
1078 /* If able to free enough room, accept this chunk. */ 1106 /* If able to free enough room, accept this chunk. */
1079 if (chunk && (freed >= needed)) { 1107 if (chunk && (freed >= needed)) {
1080 __u32 tsn; 1108 int retval;
1081 tsn = ntohl(chunk->subh.data_hdr->tsn); 1109 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1082 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport); 1110 /*
1083 sctp_ulpq_tail_data(ulpq, chunk, gfp); 1111 * Enter partial delivery if chunk has not been
1084 1112 * delivered; otherwise, drain the reassembly queue.
1085 sctp_ulpq_partial_delivery(ulpq, gfp); 1113 */
1114 if (retval <= 0)
1115 sctp_ulpq_partial_delivery(ulpq, gfp);
1116 else if (retval == 1)
1117 sctp_ulpq_reasm_drain(ulpq);
1086 } 1118 }
1087 1119
1088 sk_mem_reclaim(asoc->base.sk); 1120 sk_mem_reclaim(asoc->base.sk);