aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c168
1 files changed, 138 insertions, 30 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f4759a9bdaee..34eb977a204d 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -73,7 +73,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
73 73
74 74
75/* Flush the reassembly and ordering queues. */ 75/* Flush the reassembly and ordering queues. */
76static void sctp_ulpq_flush(struct sctp_ulpq *ulpq) 76void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
77{ 77{
78 struct sk_buff *skb; 78 struct sk_buff *skb;
79 struct sctp_ulpevent *event; 79 struct sctp_ulpevent *event;
@@ -138,26 +138,59 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
138/* Clear the partial delivery mode for this socket. Note: This 138/* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode. 139 * assumes that no association is currently in partial delivery mode.
140 */ 140 */
141int sctp_clear_pd(struct sock *sk) 141int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
142{ 142{
143 struct sctp_sock *sp = sctp_sk(sk); 143 struct sctp_sock *sp = sctp_sk(sk);
144 144
145 sp->pd_mode = 0; 145 if (atomic_dec_and_test(&sp->pd_mode)) {
146 if (!skb_queue_empty(&sp->pd_lobby)) { 146 /* This means there are no other associations in PD, so
147 struct list_head *list; 147 * we can go ahead and clear out the lobby in one shot
148 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); 148 */
149 list = (struct list_head *)&sctp_sk(sk)->pd_lobby; 149 if (!skb_queue_empty(&sp->pd_lobby)) {
150 INIT_LIST_HEAD(list); 150 struct list_head *list;
151 return 1; 151 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 INIT_LIST_HEAD(list);
154 return 1;
155 }
156 } else {
157 /* There are other associations in PD, so we only need to
158 * pull stuff out of the lobby that belongs to the
159 * associations that is exiting PD (all of its notifications
160 * are posted here).
161 */
162 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 struct sk_buff *skb, *tmp;
164 struct sctp_ulpevent *event;
165
166 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 event = sctp_skb2event(skb);
168 if (event->asoc == asoc) {
169 __skb_unlink(skb, &sp->pd_lobby);
170 __skb_queue_tail(&sk->sk_receive_queue,
171 skb);
172 }
173 }
174 }
152 } 175 }
176
153 return 0; 177 return 0;
154} 178}
155 179
180/* Set the pd_mode on the socket and ulpq */
181static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
182{
183 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
184
185 atomic_inc(&sp->pd_mode);
186 ulpq->pd_mode = 1;
187}
188
156/* Clear the pd_mode and restart any pending messages waiting for delivery. */ 189/* Clear the pd_mode and restart any pending messages waiting for delivery. */
157static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) 190static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158{ 191{
159 ulpq->pd_mode = 0; 192 ulpq->pd_mode = 0;
160 return sctp_clear_pd(ulpq->asoc->base.sk); 193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
161} 194}
162 195
163/* If the SKB of 'event' is on a list, it is the first such member 196/* If the SKB of 'event' is on a list, it is the first such member
@@ -187,18 +220,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
187 * the association the cause of the partial delivery. 220 * the association the cause of the partial delivery.
188 */ 221 */
189 222
190 if (!sctp_sk(sk)->pd_mode) { 223 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
191 queue = &sk->sk_receive_queue; 224 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) { 225 } else {
193 if (event->msg_flags & MSG_NOTIFICATION) 226 if (ulpq->pd_mode) {
194 queue = &sctp_sk(sk)->pd_lobby; 227 /* If the association is in partial delivery, we
195 else { 228 * need to finish delivering the partially processed
196 clear_pd = event->msg_flags & MSG_EOR; 229 * packet before passing any other data. This is
197 queue = &sk->sk_receive_queue; 230 * because we don't truly support stream interleaving.
231 */
232 if ((event->msg_flags & MSG_NOTIFICATION) ||
233 (SCTP_DATA_NOT_FRAG ==
234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
235 queue = &sctp_sk(sk)->pd_lobby;
236 else {
237 clear_pd = event->msg_flags & MSG_EOR;
238 queue = &sk->sk_receive_queue;
239 }
240 } else {
241 /*
242 * If fragment interleave is enabled, we
243 * can queue this to the recieve queue instead
244 * of the lobby.
245 */
246 if (sctp_sk(sk)->frag_interleave)
247 queue = &sk->sk_receive_queue;
248 else
249 queue = &sctp_sk(sk)->pd_lobby;
198 } 250 }
199 } else 251 }
200 queue = &sctp_sk(sk)->pd_lobby;
201
202 252
203 /* If we are harvesting multiple skbs they will be 253 /* If we are harvesting multiple skbs they will be
204 * collected on a list. 254 * collected on a list.
@@ -341,7 +391,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *qu
341 break; 391 break;
342 pos->next = pnext; 392 pos->next = pnext;
343 pos = pnext; 393 pos = pnext;
344 }; 394 }
345 395
346 event = sctp_skb2event(f_frag); 396 event = sctp_skb2event(f_frag);
347 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); 397 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
@@ -360,6 +410,11 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
360 struct sk_buff *first_frag = NULL; 410 struct sk_buff *first_frag = NULL;
361 __u32 ctsn, next_tsn; 411 __u32 ctsn, next_tsn;
362 struct sctp_ulpevent *retval = NULL; 412 struct sctp_ulpevent *retval = NULL;
413 struct sk_buff *pd_first = NULL;
414 struct sk_buff *pd_last = NULL;
415 size_t pd_len = 0;
416 struct sctp_association *asoc;
417 u32 pd_point;
363 418
364 /* Initialized to 0 just to avoid compiler warning message. Will 419 /* Initialized to 0 just to avoid compiler warning message. Will
365 * never be used with this value. It is referenced only after it 420 * never be used with this value. It is referenced only after it
@@ -375,6 +430,10 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
375 * we expect to find the remaining middle fragments and the last 430 * we expect to find the remaining middle fragments and the last
376 * fragment in order. If not, first_frag is reset to NULL and we 431 * fragment in order. If not, first_frag is reset to NULL and we
377 * start the next pass when we find another first fragment. 432 * start the next pass when we find another first fragment.
433 *
434 * There is a potential to do partial delivery if user sets
435 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
436 * to see if can do PD.
378 */ 437 */
379 skb_queue_walk(&ulpq->reasm, pos) { 438 skb_queue_walk(&ulpq->reasm, pos) {
380 cevent = sctp_skb2event(pos); 439 cevent = sctp_skb2event(pos);
@@ -382,14 +441,32 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
382 441
383 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { 442 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
384 case SCTP_DATA_FIRST_FRAG: 443 case SCTP_DATA_FIRST_FRAG:
444 /* If this "FIRST_FRAG" is the first
445 * element in the queue, then count it towards
446 * possible PD.
447 */
448 if (pos == ulpq->reasm.next) {
449 pd_first = pos;
450 pd_last = pos;
451 pd_len = pos->len;
452 } else {
453 pd_first = NULL;
454 pd_last = NULL;
455 pd_len = 0;
456 }
457
385 first_frag = pos; 458 first_frag = pos;
386 next_tsn = ctsn + 1; 459 next_tsn = ctsn + 1;
387 break; 460 break;
388 461
389 case SCTP_DATA_MIDDLE_FRAG: 462 case SCTP_DATA_MIDDLE_FRAG:
390 if ((first_frag) && (ctsn == next_tsn)) 463 if ((first_frag) && (ctsn == next_tsn)) {
391 next_tsn++; 464 next_tsn++;
392 else 465 if (pd_first) {
466 pd_last = pos;
467 pd_len += pos->len;
468 }
469 } else
393 first_frag = NULL; 470 first_frag = NULL;
394 break; 471 break;
395 472
@@ -399,8 +476,29 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
399 else 476 else
400 first_frag = NULL; 477 first_frag = NULL;
401 break; 478 break;
402 }; 479 }
480 }
403 481
482 asoc = ulpq->asoc;
483 if (pd_first) {
484 /* Make sure we can enter partial deliver.
485 * We can trigger partial delivery only if framgent
486 * interleave is set, or the socket is not already
487 * in partial delivery.
488 */
489 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
490 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
491 goto done;
492
493 cevent = sctp_skb2event(pd_first);
494 pd_point = sctp_sk(asoc->base.sk)->pd_point;
495 if (pd_point && pd_point <= pd_len) {
496 retval = sctp_make_reassembled_event(&ulpq->reasm,
497 pd_first,
498 pd_last);
499 if (retval)
500 sctp_ulpq_set_pd(ulpq);
501 }
404 } 502 }
405done: 503done:
406 return retval; 504 return retval;
@@ -458,7 +556,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
458 goto done; 556 goto done;
459 default: 557 default:
460 return NULL; 558 return NULL;
461 }; 559 }
462 } 560 }
463 561
464 /* We have the reassembled event. There is no need to look 562 /* We have the reassembled event. There is no need to look
@@ -550,7 +648,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
550 break; 648 break;
551 default: 649 default:
552 return NULL; 650 return NULL;
553 }; 651 }
554 } 652 }
555 653
556 /* We have the reassembled event. There is no need to look 654 /* We have the reassembled event. There is no need to look
@@ -819,19 +917,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
819{ 917{
820 struct sctp_ulpevent *event; 918 struct sctp_ulpevent *event;
821 struct sctp_association *asoc; 919 struct sctp_association *asoc;
920 struct sctp_sock *sp;
822 921
823 asoc = ulpq->asoc; 922 asoc = ulpq->asoc;
923 sp = sctp_sk(asoc->base.sk);
824 924
825 /* Are we already in partial delivery mode? */ 925 /* If the association is already in Partial Delivery mode
826 if (!sctp_sk(asoc->base.sk)->pd_mode) { 926 * we have noting to do.
927 */
928 if (ulpq->pd_mode)
929 return;
827 930
931 /* If the user enabled fragment interleave socket option,
932 * multiple associations can enter partial delivery.
933 * Otherwise, we can only enter partial delivery if the
934 * socket is not in partial deliver mode.
935 */
936 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
828 /* Is partial delivery possible? */ 937 /* Is partial delivery possible? */
829 event = sctp_ulpq_retrieve_first(ulpq); 938 event = sctp_ulpq_retrieve_first(ulpq);
830 /* Send event to the ULP. */ 939 /* Send event to the ULP. */
831 if (event) { 940 if (event) {
832 sctp_ulpq_tail_event(ulpq, event); 941 sctp_ulpq_tail_event(ulpq, event);
833 sctp_sk(asoc->base.sk)->pd_mode = 1; 942 sctp_ulpq_set_pd(ulpq);
834 ulpq->pd_mode = 1;
835 return; 943 return;
836 } 944 }
837 } 945 }